diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 776b1ab944f69..bb3c75f10aaea 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index e9c743885d78d..12729a9b6ebda 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -322,8 +322,8 @@ steps: env: BWC_VERSION: 7.16.3 - - label: "{{matrix.image}} / 7.17.23 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.23 + - label: "{{matrix.image}} / 7.17.24 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.24 timeout_in_minutes: 300 matrix: setup: @@ -337,7 +337,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 7.17.23 + BWC_VERSION: 7.17.24 - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 @@ -577,8 +577,8 @@ steps: env: BWC_VERSION: 8.13.4 - - label: "{{matrix.image}} / 8.14.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.4 + - label: "{{matrix.image}} / 8.14.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.3 timeout_in_minutes: 300 matrix: setup: @@ -592,10 +592,10 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.14.4 + BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.0 + - label: "{{matrix.image}} / 8.15.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.1 timeout_in_minutes: 300 matrix: setup: @@ -609,7 +609,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 8.15.0 + BWC_VERSION: 8.15.1 - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f908b946bb523..740fec13d1790 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -342,8 +342,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 7.17.23 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.23#bwcTest + - label: 7.17.24 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.24#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -353,7 +353,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 7.17.23 + BWC_VERSION: 7.17.24 retry: automatic: - exit_status: "-1" @@ -642,8 +642,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.14.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.4#bwcTest + - label: 8.14.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.3#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -653,7 +653,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.14.4 + BWC_VERSION: 8.14.3 retry: automatic: - exit_status: "-1" @@ -662,8 +662,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.0#bwcTest + - label: 8.15.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -673,7 +673,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 8.15.0 + BWC_VERSION: 8.15.1 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +821,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.23", "8.14.4", "8.15.0", "8.16.0"] + BWC_VERSION: ["7.17.24", "8.15.1", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/scripts/gradle-build-cache-validation.sh b/.buildkite/scripts/gradle-build-cache-validation.sh new file mode 100755 index 0000000000000..75dc9b264b8bc --- /dev/null +++ b/.buildkite/scripts/gradle-build-cache-validation.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +set -euo pipefail + +VALIDATION_SCRIPTS_VERSION=2.5.1 +GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key) +export GRADLE_ENTERPRISE_ACCESS_KEY + +curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip + +# Create a temporary file +tmpOutputFile=$(mktemp) +trap "rm $tmpOutputFile" EXIT + +set +e +gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile +# Capture the return value +retval=$? +set -e + +# Now read the content from the temporary file into a variable +perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g') +investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g') + +# Initialize HTML output variable +summaryHtml="

Build Cache Performance Characteristics

" +summaryHtml+="" + +# generate html for links +summaryHtml+="

Investigation Links

" +summaryHtml+="" + +cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info" +$summaryHtml +EOF + +# Check if the command was successful +if [ $retval -eq 0 ]; then + echo "Experiment completed successfully" +elif [ $retval -eq 1 ]; then + echo "An invalid input was provided while attempting to run the experiment" +elif [ $retval -eq 2 ]; then + echo "One of the builds that is part of the experiment failed" +elif [ $retval -eq 3 ]; then + echo "The build was not fully cacheable for the given task graph" +elif [ $retval -eq 3 ]; then + echo "An unclassified, fatal error happened while running the experiment" +fi + +exit $retval + diff --git a/.buildkite/scripts/gradle-cache-validation.sh b/.buildkite/scripts/gradle-cache-validation.sh deleted file mode 100755 index fbb957bc3b26b..0000000000000 --- a/.buildkite/scripts/gradle-cache-validation.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -VALIDATION_SCRIPTS_VERSION=2.5.1 -GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key) -export GRADLE_ENTERPRISE_ACCESS_KEY - -curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip - -# Create a temporary file -tmpOutputFile=$(mktemp) -trap "rm $tmpOutputFile" EXIT - -gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile - -# Capture the return value -retval=$? - -# Now read the content from the temporary file into a variable -perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g') -investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g') - -# Initialize HTML output variable -summaryHtml="

Performance Characteristics

" -summaryHtml+="" - -# generate html for links -summaryHtml+="

Investigation Links

" -summaryHtml+="" - -cat << EOF | buildkite-agent annotate --context "ctx-validation-summary" --style "info" -$summaryHtml -EOF - -# Check if the command was successful -if [ $retval -eq 0 ]; then - echo "Experiment completed successfully" -elif [ $retval -eq 1 ]; then - echo "An invalid input was provided while attempting to run the experiment" -elif [ $retval -eq 2 ]; then - echo "One of the builds that is part of the experiment failed" -elif [ $retval -eq 3 ]; then - echo "The build was not fully cacheable for the given task graph" -elif [ $retval -eq 3 ]; then - echo "An unclassified, fatal error happened while running the experiment" -fi - -exit $retval - diff --git a/.buildkite/scripts/gradle-configuration-cache-validation.sh b/.buildkite/scripts/gradle-configuration-cache-validation.sh new file mode 100755 index 0000000000000..8249155c5ffc5 --- /dev/null +++ b/.buildkite/scripts/gradle-configuration-cache-validation.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -euo pipefail + +# TODO/ FIXIT without a full resolved gradle home, we see issues configuration cache reuse +./gradlew --max-workers=8 --parallel --scan --no-daemon precommit + +./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 + +# Create a temporary file +tmpOutputFile=$(mktemp) +trap "rm $tmpOutputFile" EXIT + +echo "2nd run" +# TODO run-gradle.sh script causes issues because of init script handling +./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile + +# Check if the command was successful +if grep -q "Configuration cache entry reused." $tmpOutputFile; then + echo "Gradle configuration cache reused" + exit 0 +else + echo "Failed to reuse Gradle configuration cache." + exit 1 +fi + + diff --git a/.buildkite/scripts/lucene-snapshot/update-branch.sh b/.buildkite/scripts/lucene-snapshot/update-branch.sh index d02123f3236e7..6a2d1e3df05f7 100755 --- a/.buildkite/scripts/lucene-snapshot/update-branch.sh +++ b/.buildkite/scripts/lucene-snapshot/update-branch.sh @@ -2,17 +2,17 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on lucene_snapshot branches" exit 1 fi -echo --- Updating lucene_snapshot branch with main +echo --- Updating "$BUILDKITE_BRANCH" branch with main git config --global user.name elasticsearchmachine git config --global user.email 'infra-root+elasticsearchmachine@elastic.co' -git checkout lucene_snapshot +git checkout "$BUILDKITE_BRANCH" git fetch origin main git merge --no-edit origin/main -git push origin lucene_snapshot +git push origin "$BUILDKITE_BRANCH" diff --git a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh index 75f42a32cb590..7bec83d055139 100755 --- a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh +++ b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh @@ -2,8 +2,8 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on the lucene_snapshot branches" exit 1 fi diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 776be80e0d291..e43b3333dd755 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -16,7 +16,7 @@ BWC_VERSION: - "7.14.2" - "7.15.2" - "7.16.3" - - "7.17.23" + - "7.17.24" - "8.0.1" - "8.1.3" - "8.2.3" @@ -31,6 +31,6 @@ BWC_VERSION: - "8.11.4" - "8.12.2" - "8.13.4" - - "8.14.4" - - "8.15.0" + - "8.14.3" + - "8.15.1" - "8.16.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f5f7f7a7d4ecb..2eea118e57e2a 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "7.17.23" - - "8.14.4" - - "8.15.0" + - "7.17.24" + - "8.15.1" - "8.16.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0f7e3073ed022..5b98444c044d2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -27,8 +27,12 @@ libs/logstash-bridge @elastic/logstash x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @elastic/kibana-security # APM Data index templates, etc. -x-pack/plugin/apm-data/src/main/resources @elastic/apm-server -x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/apm-server +x-pack/plugin/apm-data/src/main/resources @elastic/obs-ds-intake-services +x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services + +# OTel +x-pack/plugin/otel-data/src/main/resources @elastic/obs-ds-intake-services +x-pack/plugin/otel-data/src/yamlRestTest/resources @elastic/obs-ds-intake-services # Delivery gradle @elastic/es-delivery diff --git a/README.asciidoc b/README.asciidoc index dc27735d3c015..c1945e56b025b 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -1,6 +1,6 @@ = Elasticsearch -Elasticsearch is a distributed search and analytics engine optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more. +Elasticsearch is a distributed search and analytics engine, scalable data store and vector database optimized for speed and relevance on production-scale workloads. Elasticsearch is the foundation of Elastic's open Stack platform. Search in near real-time over massive datasets, perform vector searches, integrate with generative AI applications, and much more. Use cases enabled by Elasticsearch include: @@ -33,76 +33,144 @@ https://www.elastic.co/downloads/elasticsearch[elastic.co/downloads/elasticsearc === Run Elasticsearch locally //// -IMPORTANT: This content is replicated in the Elasticsearch guide. -If you make changes, you must also update setup/set-up-local-dev-deployment.asciidoc. +IMPORTANT: This content is replicated in the Elasticsearch guide. See `run-elasticsearch-locally.asciidoc`. +Both will soon be replaced by a quickstart script. //// -To try out Elasticsearch on your own machine, we recommend using Docker -and running both Elasticsearch and Kibana. -Docker images are available from the https://www.docker.elastic.co[Elastic Docker registry]. +[WARNING] +==== +DO NOT USE THESE INSTRUCTIONS FOR PRODUCTION DEPLOYMENTS. -NOTE: Starting in Elasticsearch 8.0, security is enabled by default. -The first time you start Elasticsearch, TLS encryption is configured automatically, -a password is generated for the `elastic` user, -and a Kibana enrollment token is created so you can connect Kibana to your secured cluster. +This setup is intended for local development and testing only. +==== -For other installation options, see the -https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html[Elasticsearch installation documentation]. +The following commands help you very quickly spin up a single-node Elasticsearch cluster, together with Kibana in Docker. +Use this setup for local development or testing. -**Start Elasticsearch** +==== Prerequisites -. Install and start https://www.docker.com/products/docker-desktop[Docker -Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at least 4GB. +If you don't have Docker installed, https://www.docker.com/products/docker-desktop[download and install Docker Desktop] for your operating system. -. Start an Elasticsearch container: -+ +==== Set environment variables + +Configure the following environment variables. + +[source,sh] +---- +export ELASTIC_PASSWORD="" # password for "elastic" username +export KIBANA_PASSWORD="" # Used internally by Kibana, must be at least 6 characters long +---- + +==== Create a Docker network + +To run both Elasticsearch and Kibana, you'll need to create a Docker network: + +[source,sh] ---- -docker network create elastic -docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} <1> -docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version} +docker network create elastic-net ---- -<1> Replace {version} with the version of Elasticsearch you want to run. -+ -When you start Elasticsearch for the first time, the generated `elastic` user password and -Kibana enrollment token are output to the terminal. -+ -NOTE: You might need to scroll back a bit in the terminal to view the password -and enrollment token. -. Copy the generated password and enrollment token and save them in a secure -location. These values are shown only when you start Elasticsearch for the first time. -You'll use these to enroll Kibana with your Elasticsearch cluster and log in. +==== Run Elasticsearch + +Start the Elasticsearch container with the following command: -**Start Kibana** +[source,sh] +---- +docker run -p 127.0.0.1:9200:9200 -d --name elasticsearch --network elastic-net \ + -e ELASTIC_PASSWORD=$ELASTIC_PASSWORD \ + -e "discovery.type=single-node" \ + -e "xpack.security.http.ssl.enabled=false" \ + -e "xpack.license.self_generated.type=trial" \ + docker.elastic.co/elasticsearch/elasticsearch:{version} +---- -Kibana enables you to easily send requests to Elasticsearch and analyze, visualize, and manage data interactively. +==== Run Kibana (optional) -. In a new terminal session, start Kibana and connect it to your Elasticsearch container: -+ +To run Kibana, you must first set the `kibana_system` password in the Elasticsearch container. + +[source,sh] ---- -docker pull docker.elastic.co/kibana/kibana:{version} <1> -docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} +# configure the Kibana password in the ES container +curl -u elastic:$ELASTIC_PASSWORD \ + -X POST \ + http://localhost:9200/_security/user/kibana_system/_password \ + -d '{"password":"'"$KIBANA_PASSWORD"'"}' \ + -H 'Content-Type: application/json' ---- -<1> Replace {version} with the version of Kibana you want to run. -+ -When you start Kibana, a unique URL is output to your terminal. +// NOTCONSOLE -. To access Kibana, open the generated URL in your browser. +Start the Kibana container with the following command: - .. Paste the enrollment token that you copied when starting - Elasticsearch and click the button to connect your Kibana instance with Elasticsearch. +[source,sh] +---- +docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \ + -e ELASTICSEARCH_URL=http://elasticsearch:9200 \ + -e ELASTICSEARCH_HOSTS=http://elasticsearch:9200 \ + -e ELASTICSEARCH_USERNAME=kibana_system \ + -e ELASTICSEARCH_PASSWORD=$KIBANA_PASSWORD \ + -e "xpack.security.enabled=false" \ + -e "xpack.license.self_generated.type=trial" \ + docker.elastic.co/kibana/kibana:{version} +---- - .. Log in to Kibana as the `elastic` user with the password that was generated - when you started Elasticsearch. +.Trial license +[%collapsible] +==== +The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page]. +==== -**Send requests to Elasticsearch** +==== Send requests to Elasticsearch You send data and other requests to Elasticsearch through REST APIs. You can interact with Elasticsearch using any client that sends HTTP requests, such as the https://www.elastic.co/guide/en/elasticsearch/client/index.html[Elasticsearch language clients] and https://curl.se[curl]. + +===== Using curl + +Here's an example curl command to create a new Elasticsearch index, using basic auth: + +[source,sh] +---- +curl -u elastic:$ELASTIC_PASSWORD \ + -X PUT \ + http://localhost:9200/my-new-index \ + -H 'Content-Type: application/json' +---- +// NOTCONSOLE + +===== Using a language client + +To connect to your local dev Elasticsearch cluster with a language client, you can use basic authentication with the `elastic` username and the password you set in the environment variable. + +You'll use the following connection details: + +* **Elasticsearch endpoint**: `http://localhost:9200` +* **Username**: `elastic` +* **Password**: `$ELASTIC_PASSWORD` (Value you set in the environment variable) + +For example, to connect with the Python `elasticsearch` client: + +[source,python] +---- +import os +from elasticsearch import Elasticsearch + +username = 'elastic' +password = os.getenv('ELASTIC_PASSWORD') # Value you set in the environment variable + +client = Elasticsearch( + "http://localhost:9200", + basic_auth=(username, password) +) + +print(client.info()) +---- + +===== Using the Dev Tools Console + Kibana's developer console provides an easy way to experiment and test requests. -To access the console, go to **Management > Dev Tools**. +To access the console, open Kibana, then go to **Management** > **Dev Tools**. **Add data** diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 49e81a67e85f9..b16621aaaa471 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -1,4 +1,5 @@ import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.TestUtil /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one @@ -29,6 +30,7 @@ tasks.named("javadoc").configure { enabled = false } configurations { expression painless + nativeLib } dependencies { @@ -37,6 +39,7 @@ dependencies { // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } + api(project(':libs:elasticsearch-h3')) api(project(':modules:aggregations')) api(project(':x-pack:plugin:esql-core')) api(project(':x-pack:plugin:esql')) @@ -44,6 +47,7 @@ dependencies { implementation project(path: ':libs:elasticsearch-simdvec') expression(project(path: ':modules:lang-expression', configuration: 'zip')) painless(project(path: ':modules:lang-painless', configuration: 'zip')) + nativeLib(project(':libs:elasticsearch-native')) api "org.openjdk.jmh:jmh-core:$versions.jmh" annotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" // Dependencies of JMH @@ -75,17 +79,8 @@ tasks.register("copyPainless", Copy) { tasks.named("run").configure { executable = "${BuildParams.runtimeJavaHome}/bin/java" args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" - dependsOn "copyExpression", "copyPainless" - systemProperty 'java.library.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}") -} - -String platformName() { - String name = System.getProperty("os.name"); - if (name.startsWith("Mac")) { - return "darwin"; - } else { - return name.toLowerCase(Locale.ROOT); - } + dependsOn "copyExpression", "copyPainless", configurations.nativeLib + systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString()) } spotless { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java index 49603043e7bcc..59fdfff3025a1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java @@ -20,13 +20,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBigArrayBlock; import org.elasticsearch.compute.data.BooleanBigArrayVector; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; -import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DoubleBigArrayBlock; import org.elasticsearch.compute.data.DoubleBigArrayVector; -import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntBigArrayBlock; import org.elasticsearch.compute.data.IntBigArrayVector; @@ -34,39 +31,13 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBigArrayBlock; import org.elasticsearch.compute.data.LongBigArrayVector; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OperationsPerInvocation; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; -import org.openjdk.jmh.annotations.Warmup; import java.util.ArrayList; import java.util.BitSet; -import java.util.Collections; -import java.util.List; import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.stream.IntStream; - -@Warmup(iterations = 5) -@Measurement(iterations = 7) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Thread) -@Fork(1) -public class BlockBenchmark { +public class BlockBenchmark { /** * All data type/block kind combinations to be loaded before the benchmark. * It is important to be exhaustive here so that all implementers of {@link IntBlock#getInt(int)} are actually loaded when we benchmark @@ -114,35 +85,12 @@ public class BlockBenchmark { private static final int MAX_MV_ELEMENTS = 100; private static final int MAX_BYTES_REF_LENGTH = 255; - private static final Random random = new Random(); - - private static final BlockFactory blockFactory = BlockFactory.getInstance( - new NoopCircuitBreaker("noop"), - BigArrays.NON_RECYCLING_INSTANCE - ); - - static { - // Smoke test all the expected values and force loading subclasses more like prod - int totalPositions = 10; - long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; - - for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) { - String[] params = paramString.split("/"); - String dataType = params[0]; - String blockKind = params[1]; - - BenchmarkBlocks data = buildBlocks(dataType, blockKind, totalPositions); - int[][] traversalOrders = createTraversalOrders(data.blocks, false); - run(dataType, data, traversalOrders, actualCheckSums); - assertCheckSums(data, actualCheckSums); - } - } + static final Random random = new Random(); - private record BenchmarkBlocks(Block[] blocks, long[] checkSums) {}; + static final BlockFactory blockFactory = BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE); - private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, int totalPositions) { + static Block[] buildBlocks(String dataType, String blockKind, int totalPositions) { Block[] blocks = new Block[NUM_BLOCKS_PER_ITERATION]; - long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION]; switch (dataType) { case "boolean" -> { @@ -237,11 +185,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in } } } - - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - BooleanBlock block = (BooleanBlock) blocks[blockIndex]; - checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); - } } case "BytesRef" -> { for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { @@ -294,11 +237,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in } } } - - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - BytesRefBlock block = (BytesRefBlock) blocks[blockIndex]; - checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); - } } case "double" -> { for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { @@ -386,11 +324,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in } } } - - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - DoubleBlock block = (DoubleBlock) blocks[blockIndex]; - checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); - } } case "int" -> { for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { @@ -478,11 +411,6 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in } } } - - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - IntBlock block = (IntBlock) blocks[blockIndex]; - checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); - } } case "long" -> { for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { @@ -570,36 +498,12 @@ private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, in } } } - - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - LongBlock block = (LongBlock) blocks[blockIndex]; - checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); - } } default -> { throw new IllegalStateException("illegal data type [" + dataType + "]"); } } - - return new BenchmarkBlocks(blocks, checkSums); - } - - private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) { - int[][] orders = new int[blocks.length][]; - - for (int i = 0; i < blocks.length; i++) { - IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount()); - - if (randomized) { - List positions = new java.util.ArrayList<>(positionsStream.boxed().toList()); - Collections.shuffle(positions, random); - orders[i] = positions.stream().mapToInt(x -> x).toArray(); - } else { - orders[i] = positionsStream.toArray(); - } - } - - return orders; + return blocks; } private static int[] randomFirstValueIndexes(int totalPositions) { @@ -631,220 +535,4 @@ private static BitSet randomNulls(int positionCount) { return nulls; } - - private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) { - switch (dataType) { - case "boolean" -> { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - BooleanBlock block = (BooleanBlock) data.blocks[blockIndex]; - - resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]); - } - } - case "BytesRef" -> { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex]; - - resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]); - } - } - case "double" -> { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - DoubleBlock block = (DoubleBlock) data.blocks[blockIndex]; - - resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]); - } - } - case "int" -> { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - IntBlock block = (IntBlock) data.blocks[blockIndex]; - - resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]); - } - } - case "long" -> { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - LongBlock block = (LongBlock) data.blocks[blockIndex]; - - resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]); - } - } - default -> { - throw new IllegalStateException(); - } - } - } - - private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) { - for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { - if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) { - throw new AssertionError("checksums do not match for block [" + blockIndex + "]"); - } - } - } - - private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) { - long sum = 0; - - for (int position : traversalOrder) { - if (block.isNull(position)) { - continue; - } - int start = block.getFirstValueIndex(position); - int end = start + block.getValueCount(position); - for (int i = start; i < end; i++) { - sum += block.getBoolean(i) ? 1 : 0; - } - } - - return sum; - } - - private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) { - long sum = 0; - BytesRef currentValue = new BytesRef(); - - for (int position : traversalOrder) { - if (block.isNull(position)) { - continue; - } - int start = block.getFirstValueIndex(position); - int end = start + block.getValueCount(position); - for (int i = start; i < end; i++) { - block.getBytesRef(i, currentValue); - sum += currentValue.length > 0 ? currentValue.bytes[0] : 0; - } - } - - return sum; - } - - private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) { - long sum = 0; - - for (int position : traversalOrder) { - if (block.isNull(position)) { - continue; - } - int start = block.getFirstValueIndex(position); - int end = start + block.getValueCount(position); - for (int i = start; i < end; i++) { - // Use an operation that is not affected by rounding errors. Otherwise, the result may depend on the traversalOrder. - sum += (long) block.getDouble(i); - } - } - - return sum; - } - - private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) { - int sum = 0; - - for (int position : traversalOrder) { - if (block.isNull(position)) { - continue; - } - int start = block.getFirstValueIndex(position); - int end = start + block.getValueCount(position); - for (int i = start; i < end; i++) { - sum += block.getInt(i); - } - } - - return sum; - } - - private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) { - long sum = 0; - - for (int position : traversalOrder) { - if (block.isNull(position)) { - continue; - } - int start = block.getFirstValueIndex(position); - int end = start + block.getValueCount(position); - for (int i = start; i < end; i++) { - sum += block.getLong(i); - } - } - - return sum; - } - - private static boolean isRandom(String accessType) { - return accessType.equalsIgnoreCase("random"); - } - - /** - * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS} - */ - @Param( - { - "boolean/array", - "boolean/array-multivalue-null", - "boolean/big-array", - "boolean/big-array-multivalue-null", - "boolean/vector", - "boolean/vector-big-array", - "boolean/vector-const", - "BytesRef/array", - "BytesRef/array-multivalue-null", - "BytesRef/vector", - "BytesRef/vector-const", - "double/array", - "double/array-multivalue-null", - "double/big-array", - "double/big-array-multivalue-null", - "double/vector", - "double/vector-big-array", - "double/vector-const", - "int/array", - "int/array-multivalue-null", - "int/big-array", - "int/big-array-multivalue-null", - "int/vector", - "int/vector-big-array", - "int/vector-const", - "long/array", - "long/array-multivalue-null", - "long/big-array", - "long/big-array-multivalue-null", - "long/vector", - "long/vector-big-array", - "long/vector-const" } - ) - public String dataTypeAndBlockKind; - - @Param({ "sequential", "random" }) - public String accessType; - - private BenchmarkBlocks data; - - private int[][] traversalOrders; - - private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; - - @Setup - public void setup() { - String[] params = dataTypeAndBlockKind.split("/"); - String dataType = params[0]; - String blockKind = params[1]; - - data = buildBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS); - traversalOrders = createTraversalOrders(data.blocks, isRandom(accessType)); - } - - @Benchmark - @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS) - public void run() { - String[] params = dataTypeAndBlockKind.split("/"); - String dataType = params[0]; - - run(dataType, data, traversalOrders, actualCheckSums); - } - - @TearDown(Level.Iteration) - public void assertCheckSums() { - assertCheckSums(data, actualCheckSums); - } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java new file mode 100644 index 0000000000000..23048ad188a37 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockKeepMaskBenchmark.java @@ -0,0 +1,295 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.compute.operator; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OperationsPerInvocation; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 5) +@Measurement(iterations = 7) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Thread) +@Fork(1) +public class BlockKeepMaskBenchmark extends BlockBenchmark { + static { + // Smoke test all the expected values and force loading subclasses more like prod + int totalPositions = 10; + for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) { + String[] params = paramString.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + BooleanVector mask = buildMask(totalPositions); + + BenchmarkBlocks data = buildBenchmarkBlocks(dataType, blockKind, mask, totalPositions); + Block[] results = new Block[NUM_BLOCKS_PER_ITERATION]; + run(data, mask, results); + assertCheckSums(dataType, blockKind, data, results, totalPositions); + } + } + + record BenchmarkBlocks(Block[] blocks, long[] checkSums) {}; + + static BenchmarkBlocks buildBenchmarkBlocks(String dataType, String blockKind, BooleanVector mask, int totalPositions) { + Block[] blocks = BlockBenchmark.buildBlocks(dataType, blockKind, totalPositions); + return new BenchmarkBlocks(blocks, checksumsFor(dataType, blocks, mask)); + } + + static long[] checksumsFor(String dataType, Block[] blocks, BooleanVector mask) { + long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION]; + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBooleanCheckSum(block, mask); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBytesRefCheckSum(block, mask); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeDoubleCheckSum(block, mask); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeIntCheckSum(block, mask); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeLongCheckSum(block, mask); + } + } + // TODO float + default -> throw new IllegalStateException("illegal data type [" + dataType + "]"); + } + return checkSums; + } + + static BooleanVector buildMask(int totalPositions) { + try (BooleanVector.FixedBuilder builder = blockFactory.newBooleanVectorFixedBuilder(totalPositions)) { + for (int p = 0; p < totalPositions; p++) { + builder.appendBoolean(p % 2 == 0); + } + return builder.build(); + } + } + + private static void run(BenchmarkBlocks data, BooleanVector mask, Block[] results) { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + results[blockIndex] = data.blocks[blockIndex].keepMask(mask); + } + } + + private static void assertCheckSums(String dataType, String blockKind, BenchmarkBlocks data, Block[] results, int positionCount) { + long[] checkSums = checksumsFor(dataType, results, blockFactory.newConstantBooleanVector(true, positionCount)); + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (checkSums[blockIndex] != data.checkSums[blockIndex]) { + throw new AssertionError( + "checksums do not match for block [" + + blockIndex + + "][" + + dataType + + "][" + + blockKind + + "]: " + + checkSums[blockIndex] + + " vs " + + data.checkSums[blockIndex] + ); + } + } + } + + private static long computeBooleanCheckSum(BooleanBlock block, BooleanVector mask) { + long sum = 0; + + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p) || mask.getBoolean(p) == false) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + sum += block.getBoolean(i) ? 1 : 0; + } + } + + return sum; + } + + private static long computeBytesRefCheckSum(BytesRefBlock block, BooleanVector mask) { + long sum = 0; + BytesRef scratch = new BytesRef(); + + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p) || mask.getBoolean(p) == false) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + BytesRef v = block.getBytesRef(i, scratch); + sum += v.length > 0 ? v.bytes[v.offset] : 0; + } + } + + return sum; + } + + private static long computeDoubleCheckSum(DoubleBlock block, BooleanVector mask) { + long sum = 0; + + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p) || mask.getBoolean(p) == false) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + sum += (long) block.getDouble(i); + } + } + + return sum; + } + + private static long computeIntCheckSum(IntBlock block, BooleanVector mask) { + int sum = 0; + + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p) || mask.getBoolean(p) == false) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + sum += block.getInt(i); + } + } + + return sum; + } + + private static long computeLongCheckSum(LongBlock block, BooleanVector mask) { + long sum = 0; + + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p) || mask.getBoolean(p) == false) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + sum += block.getLong(i); + } + } + + return sum; + } + + /** + * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS} + */ + @Param( + { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" } + ) + public String dataTypeAndBlockKind; + + private BenchmarkBlocks data; + + private final BooleanVector mask = buildMask(BLOCK_TOTAL_POSITIONS); + + private final Block[] results = new Block[NUM_BLOCKS_PER_ITERATION]; + + @Setup + public void setup() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + data = buildBenchmarkBlocks(dataType, blockKind, mask, BLOCK_TOTAL_POSITIONS); + } + + @Benchmark + @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS) + public void run() { + run(data, mask, results); + } + + @TearDown(Level.Iteration) + public void assertCheckSums() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + assertCheckSums(dataType, blockKind, data, results, BLOCK_TOTAL_POSITIONS); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java new file mode 100644 index 0000000000000..327dcfcff3a28 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockReadBenchmark.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.compute.operator; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.*; +import org.elasticsearch.compute.data.*; +import org.openjdk.jmh.annotations.*; + +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +@Warmup(iterations = 5) +@Measurement(iterations = 7) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Thread) +@Fork(1) +public class BlockReadBenchmark extends BlockBenchmark { + static { + // Smoke test all the expected values and force loading subclasses more like prod + int totalPositions = 10; + long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) { + String[] params = paramString.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + BenchmarkBlocks data = buildBenchmarkBlocks(dataType, blockKind, totalPositions); + int[][] traversalOrders = createTraversalOrders(data.blocks(), false); + run(dataType, data, traversalOrders, actualCheckSums); + assertCheckSums(data, actualCheckSums); + } + } + + private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) { + int[][] orders = new int[blocks.length][]; + + for (int i = 0; i < blocks.length; i++) { + IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount()); + + if (randomized) { + List positions = new ArrayList<>(positionsStream.boxed().toList()); + Collections.shuffle(positions, random); + orders[i] = positions.stream().mapToInt(x -> x).toArray(); + } else { + orders[i] = positionsStream.toArray(); + } + } + + return orders; + } + + record BenchmarkBlocks(Block[] blocks, long[] checkSums) {}; + + static BenchmarkBlocks buildBenchmarkBlocks(String dataType, String blockKind, int totalPositions) { + Block[] blocks = BlockBenchmark.buildBlocks(dataType, blockKind, totalPositions); + long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION]; + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + // TODO float + default -> throw new IllegalStateException("illegal data type [" + dataType + "]"); + } + return new BenchmarkBlocks(blocks, checkSums); + } + + private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) { + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]); + } + } + default -> { + throw new IllegalStateException(); + } + } + } + + private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) { + throw new AssertionError("checksums do not match for block [" + blockIndex + "]"); + } + } + } + + private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getBoolean(i) ? 1 : 0; + } + } + + return sum; + } + + private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) { + long sum = 0; + BytesRef scratch = new BytesRef(); + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + BytesRef v = block.getBytesRef(i, scratch); + sum += v.length > 0 ? v.bytes[v.offset] : 0; + } + } + + return sum; + } + + private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + // Use an operation that is not affected by rounding errors. Otherwise, the result may depend on the traversalOrder. + sum += (long) block.getDouble(i); + } + } + + return sum; + } + + private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) { + int sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getInt(i); + } + } + + return sum; + } + + private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getLong(i); + } + } + + return sum; + } + + private static boolean isRandom(String accessType) { + return accessType.equalsIgnoreCase("random"); + } + + /** + * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS} + */ + @Param( + { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" } + ) + public String dataTypeAndBlockKind; + + @Param({ "sequential", "random" }) + public String accessType; + + private BenchmarkBlocks data; + + private int[][] traversalOrders; + + private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + @Setup + public void setup() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + data = buildBenchmarkBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS); + traversalOrders = createTraversalOrders(data.blocks(), isRandom(accessType)); + } + + @Benchmark + @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS) + public void run() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + + run(dataType, data, traversalOrders, actualCheckSums); + } + + @TearDown(Level.Iteration) + public void assertCheckSums() { + assertCheckSums(data, actualCheckSums); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java new file mode 100644 index 0000000000000..2441acab7d405 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.Main; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 25, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(1) +public class H3Benchmark { + + @Benchmark + public void pointToH3(H3State state, Blackhole bh) { + for (int i = 0; i < state.points.length; i++) { + for (int res = 0; res <= 15; res++) { + bh.consume(H3.geoToH3(state.points[i][0], state.points[i][1], res)); + } + } + } + + @Benchmark + public void h3Boundary(H3State state, Blackhole bh) { + for (int i = 0; i < state.h3.length; i++) { + bh.consume(H3.h3ToGeoBoundary(state.h3[i])); + } + } + + public static void main(String[] args) throws Exception { + Main.main(args); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java new file mode 100644 index 0000000000000..5707e692a0750 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import java.io.IOException; +import java.util.Random; + +@State(Scope.Benchmark) +public class H3State { + + double[][] points = new double[1000][2]; + long[] h3 = new long[1000]; + + @Setup(Level.Trial) + public void setupTrial() throws IOException { + Random random = new Random(1234); + for (int i = 0; i < points.length; i++) { + points[i][0] = random.nextDouble() * 180 - 90; // lat + points[i][1] = random.nextDouble() * 360 - 180; // lon + int res = random.nextInt(16); // resolution + h3[i] = H3.geoToH3(points[i][0], points[i][1], res); + } + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index 8c5de05a01648..d7a72615f4b93 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -32,6 +32,7 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -167,7 +168,7 @@ public void setUp() throws Exception { .build(); Settings settings = Settings.builder().put("node.name", ShardsAvailabilityHealthIndicatorBenchmark.class.getSimpleName()).build(); - ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP, new DefaultBuiltInExecutorBuilders()); ClusterService clusterService = new ClusterService( Settings.EMPTY, diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 5a27abe8be2a4..fe221ec980dc3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -186,6 +186,11 @@ public void setDocument(int docid) { public boolean needs_score() { return false; } + + @Override + public boolean needs_termStats() { + return false; + } }; }; } diff --git a/branches.json b/branches.json index b852cd1fa5dbd..1d860501cbc87 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.15" }, - { - "branch": "8.14" - }, { "branch": "7.17" } diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy index 172bf9115d152..a89a26d2800d4 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleInternalPluginFuncTest.groovy @@ -15,17 +15,23 @@ abstract class AbstractGradleInternalPluginFuncTest extends AbstractJavaGradleFu abstract Class getPluginClassUnderTest(); def setup() { + settingsFile.text = """ + plugins { + id 'elasticsearch.java-toolchain' + } + """ + settingsFile.text + buildFile << """ import ${getPluginClassUnderTest().getName()} - + plugins { // bring in build-tools-internal onto the classpath id 'elasticsearch.global-build-info' } // internally used plugins do not have a plugin id as they are - // not intended to be used directly from build scripts + // not intended to be used directly from build scripts plugins.apply(${getPluginClassUnderTest().getSimpleName()}) - + """ } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy index d74dce6924e32..c63eca8680179 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy @@ -19,22 +19,22 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { given: someLibProject() subProject("some-depending-lib") { - buildFile << """ + buildFile << """ plugins { id 'elasticsearch.java-doc' id 'java' } group = 'org.acme.depending' - + dependencies { implementation project(':some-lib') } """ classFile('org.acme.depending.SomeDepending') << """ package org.acme.depending; - + import org.acme.Something; - + public class SomeDepending { public Something createSomething() { return new Something(); @@ -66,16 +66,17 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { def "sources of shadowed dependencies are added to projects javadoc"() { given: + settingsFile.text = "" someLibProject() << """version = 1.0""" subProject("some-depending-lib") { - buildFile << """ + buildFile << """ plugins { id 'elasticsearch.java-doc' id 'com.github.johnrengelman.shadow' version '7.1.2' id 'java' } group = 'org.acme.depending' - + dependencies { implementation project(':some-lib') shadow project(':some-shadowed-lib') @@ -83,9 +84,9 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { """ classFile('org.acme.depending.SomeDepending') << """ package org.acme.depending; - + import org.acme.Something; - + public class SomeDepending { public Something createSomething() { return new Something(); @@ -94,9 +95,9 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { """ classFile('org.acme.depending.SomeShadowedDepending') << """ package org.acme.depending; - + import org.acme.shadowed.Shadowed; - + public class SomeShadowedDepending { public Shadowed createShadowed() { return new Shadowed(); @@ -114,7 +115,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { """ classFile('org.acme.shadowed.Shadowed') << """ package org.acme.shadowed; - + public class Shadowed { } """ @@ -145,22 +146,22 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { tasks.named("javadoc").configure { enabled = false } """ subProject("some-depending-lib") { - buildFile << """ + buildFile << """ plugins { id 'elasticsearch.java-doc' id 'java' } group = 'org.acme.depending' - + dependencies { implementation project(':some-lib') } """ classFile('org.acme.depending.SomeDepending') << """ package org.acme.depending; - + import org.acme.Something; - + public class SomeDepending { public Something createSomething() { return new Something(); @@ -264,7 +265,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { classFile('org.acme.Something') << """ package org.acme; - + public class Something { } """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy index b365624b5749a..c6a4572cb8a86 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTaskFuncTest.groovy @@ -18,6 +18,7 @@ import org.elasticsearch.gradle.fixtures.AbstractGradleFuncTest import org.elasticsearch.gradle.fixtures.AbstractGradleInternalPluginFuncTest import org.elasticsearch.gradle.internal.conventions.precommit.LicenseHeadersPrecommitPlugin import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin +import org.gradle.testkit.runner.GradleRunner import org.gradle.testkit.runner.TaskOutcome @@ -211,6 +212,10 @@ class ThirdPartyAuditTaskFuncTest extends AbstractGradleInternalPluginFuncTest { loggingDynamicType.toJar(targetFile(dir("${baseGroupFolderPath}/broken-log4j/0.0.1/"), "broken-log4j-0.0.1.jar")) } + GradleRunner gradleRunner(Object... arguments) { + return super.gradleRunner(arguments).withEnvironment([RUNTIME_JAVA_HOME: System.getProperty("java.home")]) + } + static File targetFile(File dir, String fileName) { new File(dir, fileName) } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy index 94fa329af1715..5e96fa524268a 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -12,6 +12,7 @@ import spock.lang.IgnoreIf import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest +import org.gradle.testkit.runner.GradleRunner import org.gradle.testkit.runner.TaskOutcome @IgnoreIf({ os.isWindows() }) @@ -205,4 +206,8 @@ echo "Running elasticsearch \$0" } """ } + + GradleRunner gradleRunner(Object... arguments) { + return super.gradleRunner(arguments).withEnvironment([RUNTIME_JAVA_HOME: System.getProperty("java.home")]) + } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 6cb22dad9bc79..285c3a61b08c2 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -168,8 +168,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { '-ea', '-Djava.security.manager=allow', '-Djava.locale.providers=SPI,COMPAT', - '-Djava.library.path=' + testLibraryPath, - '-Djna.library.path=' + testLibraryPath, + '-Des.nativelibs.path=' + testLibraryPath, // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle index f85ceed18604b..3400be77a588d 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle @@ -25,7 +25,6 @@ configure(allprojects) { JvmVendorSpec.ORACLE : JvmVendorSpec.matching(VersionProperties.bundledJdkVendor) } - project.tasks.withType(Test).configureEach { Test test -> if (BuildParams.getIsRuntimeJavaHomeSet()) { test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + @@ -47,12 +46,4 @@ configure(allprojects) { } } } - project.plugins.withType(ThirdPartyAuditPrecommitPlugin) { - project.getTasks().withType(ThirdPartyAuditTask.class).configureEach { - if (BuildParams.getIsRuntimeJavaHomeSet() == false) { - javaHome.set(launcher.map { it.metadata.installationPath.asFile.path }) - targetCompatibility.set(providers.provider(() -> JavaVersion.toVersion(launcher.map { it.metadata.javaRuntimeVersion }.get()))) - } - } - } } diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy index e454d2ee38fff..658e2623cbbd7 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy @@ -13,6 +13,7 @@ import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.internal.test.AntFixture import org.gradle.api.file.FileSystemOperations import org.gradle.api.file.ProjectLayout +import org.gradle.api.provider.ProviderFactory import org.gradle.api.tasks.Internal import org.gradle.process.ExecOperations @@ -24,14 +25,17 @@ abstract class AntFixtureStop extends LoggedExec implements FixtureStop { AntFixture fixture @Inject - AntFixtureStop(ProjectLayout projectLayout, ExecOperations execOperations, FileSystemOperations fileSystemOperations) { - super(projectLayout, execOperations, fileSystemOperations) + AntFixtureStop(ProjectLayout projectLayout, + ExecOperations execOperations, + FileSystemOperations fileSystemOperations, + ProviderFactory providerFactory) { + super(projectLayout, execOperations, fileSystemOperations, providerFactory) } void setFixture(AntFixture fixture) { assert this.fixture == null this.fixture = fixture; - final Object pid = "${ -> this.fixture.pid }" + final Object pid = "${-> this.fixture.pid}" onlyIf("pidFile exists") { fixture.pidFile.exists() } doFirst { logger.info("Shutting down ${fixture.name} with pid ${pid}") diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index b513fd7b93631..b27f480df4e63 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -61,7 +61,7 @@ public void apply(Project target) { : System.getenv("BUILDKITE_BUILD_NUMBER"); String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { - File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); + File targetFile = calculateTargetFile(target, buildNumber); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); DevelocityConfiguration extension = target.getExtensions().getByType(DevelocityConfiguration.class); @@ -86,9 +86,19 @@ public void apply(Project target) { } } + private File calculateTargetFile(Project target, String buildNumber) { + File uploadFile = target.file("build/" + buildNumber + ".tar.bz2"); + int artifactIndex = 1; + while (uploadFile.exists()) { + uploadFile = target.file("build/" + buildNumber + "-" + artifactIndex++ + ".tar.bz2"); + } + return uploadFile; + } + private List resolveProjectLogs(File projectDir) { var projectDirFiles = getFileOperations().fileTree(projectDir); projectDirFiles.include("**/*.hprof"); + projectDirFiles.include("**/build/reports/configuration-cache/**"); projectDirFiles.include("**/build/test-results/**/*.xml"); projectDirFiles.include("**/build/testclusters/**"); projectDirFiles.include("**/build/testrun/*/temp/**"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index f95d9d72a473f..a3b1dd9731591 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -189,9 +189,7 @@ private static void configureNativeLibraryPath(Project project) { var libraryPath = (Supplier) () -> TestUtil.getTestLibraryPath(nativeConfigFiles.getAsPath()); test.dependsOn(nativeConfigFiles); - // we may use JNA or the JDK's foreign function api to load libraries, so we set both sysprops - systemProperties.systemProperty("java.library.path", libraryPath); - systemProperties.systemProperty("jna.library.path", libraryPath); + systemProperties.systemProperty("es.nativelibs.path", libraryPath); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java index e9e75a711a8ff..03b8f19d10b13 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmbeddedProviderExtension.java @@ -43,17 +43,16 @@ void impl(String implName, Project implProject) { }); String manifestTaskName = "generate" + capitalName + "ProviderManifest"; - Provider generatedResourcesDir = project.getLayout().getBuildDirectory().dir("generated-resources"); + Provider generatedResourcesRoot = project.getLayout().getBuildDirectory().dir("generated-resources"); var generateProviderManifest = project.getTasks().register(manifestTaskName, GenerateProviderManifest.class); generateProviderManifest.configure(t -> { - t.getManifestFile().set(generatedResourcesDir.map(d -> d.file("LISTING.TXT"))); + t.getManifestFile().set(generatedResourcesRoot.map(d -> d.dir(manifestTaskName).file("LISTING.TXT"))); t.getProviderImplClasspath().from(implConfig); }); - String implTaskName = "generate" + capitalName + "ProviderImpl"; var generateProviderImpl = project.getTasks().register(implTaskName, Sync.class); generateProviderImpl.configure(t -> { - t.into(generatedResourcesDir); + t.into(generatedResourcesRoot.map(d -> d.dir(implTaskName))); t.into("IMPL-JARS/" + implName, spec -> { spec.from(implConfig); spec.from(generateProviderManifest); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java index b090f05c14c83..964784643936b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java @@ -26,15 +26,15 @@ import static java.util.Objects.requireNonNull; public class BuildParams { - private static File runtimeJavaHome; + private static Provider runtimeJavaHome; private static Boolean isRuntimeJavaHomeSet; private static List javaVersions; private static JavaVersion minimumCompilerVersion; private static JavaVersion minimumRuntimeVersion; private static JavaVersion gradleJavaVersion; - private static JavaVersion runtimeJavaVersion; + private static Provider runtimeJavaVersion; private static Provider> javaToolChainSpec; - private static String runtimeJavaDetails; + private static Provider runtimeJavaDetails; private static Boolean inFipsJvm; private static String gitRevision; private static String gitOrigin; @@ -58,7 +58,7 @@ public static void init(Consumer initializer) { } public static File getRuntimeJavaHome() { - return value(runtimeJavaHome); + return value(runtimeJavaHome).get(); } public static Boolean getIsRuntimeJavaHomeSet() { @@ -82,11 +82,11 @@ public static JavaVersion getGradleJavaVersion() { } public static JavaVersion getRuntimeJavaVersion() { - return value(runtimeJavaVersion); + return value(runtimeJavaVersion.get()); } public static String getRuntimeJavaDetails() { - return value(runtimeJavaDetails); + return value(runtimeJavaDetails.get()); } public static Boolean isInFipsJvm() { @@ -126,7 +126,7 @@ public static Boolean isCi() { } public static Boolean isGraalVmRuntime() { - return value(runtimeJavaDetails.toLowerCase().contains("graalvm")); + return value(runtimeJavaDetails.get().toLowerCase().contains("graalvm")); } public static Integer getDefaultParallel() { @@ -182,16 +182,18 @@ public void reset() { }); } - public void setRuntimeJavaHome(File runtimeJavaHome) { - try { - BuildParams.runtimeJavaHome = requireNonNull(runtimeJavaHome).getCanonicalFile(); - } catch (IOException e) { - throw new RuntimeException(e); - } + public void setRuntimeJavaHome(Provider runtimeJavaHome) { + BuildParams.runtimeJavaHome = runtimeJavaHome.map(javaHome -> { + try { + return javaHome.getCanonicalFile(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); } - public void setIsRuntimeJavaHomeSet(boolean isRutimeJavaHomeSet) { - BuildParams.isRuntimeJavaHomeSet = isRutimeJavaHomeSet; + public void setIsRuntimeJavaHomeSet(boolean isRuntimeJavaHomeSet) { + BuildParams.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; } public void setJavaVersions(List javaVersions) { @@ -210,11 +212,11 @@ public void setGradleJavaVersion(JavaVersion gradleJavaVersion) { BuildParams.gradleJavaVersion = requireNonNull(gradleJavaVersion); } - public void setRuntimeJavaVersion(JavaVersion runtimeJavaVersion) { + public void setRuntimeJavaVersion(Provider runtimeJavaVersion) { BuildParams.runtimeJavaVersion = requireNonNull(runtimeJavaVersion); } - public void setRuntimeJavaDetails(String runtimeJavaDetails) { + public void setRuntimeJavaDetails(Provider runtimeJavaDetails) { BuildParams.runtimeJavaDetails = runtimeJavaDetails; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index e61bbefc9a973..b287815854098 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -8,6 +8,7 @@ package org.elasticsearch.gradle.internal.info; import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.BwcVersions; import org.elasticsearch.gradle.internal.conventions.info.GitInfo; import org.elasticsearch.gradle.internal.conventions.info.ParallelDetector; @@ -31,7 +32,6 @@ import org.gradle.internal.jvm.inspection.JvmMetadataDetector; import org.gradle.internal.jvm.inspection.JvmVendor; import org.gradle.jvm.toolchain.JavaLanguageVersion; -import org.gradle.jvm.toolchain.JavaLauncher; import org.gradle.jvm.toolchain.JavaToolchainService; import org.gradle.jvm.toolchain.JavaToolchainSpec; import org.gradle.jvm.toolchain.JvmVendorSpec; @@ -48,10 +48,8 @@ import java.nio.file.Files; import java.time.ZoneOffset; import java.time.ZonedDateTime; -import java.util.Arrays; import java.util.List; import java.util.Locale; -import java.util.Optional; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -98,9 +96,11 @@ public void apply(Project project) { JavaVersion minimumCompilerVersion = JavaVersion.toVersion(getResourceContents("/minimumCompilerVersion")); JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); - Optional selectedRuntimeJavaHome = findRuntimeJavaHome(); - File actualRuntimeJavaHome = selectedRuntimeJavaHome.orElse(Jvm.current().getJavaHome()); - boolean isRuntimeJavaHomeSet = selectedRuntimeJavaHome.isPresent(); + Provider explicitRuntimeJavaHome = findRuntimeJavaHome(); + boolean isExplicitRuntimeJavaHomeSet = explicitRuntimeJavaHome.isPresent(); + Provider actualRuntimeJavaHome = isExplicitRuntimeJavaHomeSet + ? explicitRuntimeJavaHome + : resolveJavaHomeFromToolChainService(VersionProperties.getBundledJdkMajorVersion()); GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); @@ -108,16 +108,22 @@ public void apply(Project project) { params.reset(); params.setRuntimeJavaHome(actualRuntimeJavaHome); params.setJavaToolChainSpec(resolveToolchainSpecFromEnv()); + Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( + runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) + ); params.setRuntimeJavaVersion( - determineJavaVersion( - "runtime java.home", - actualRuntimeJavaHome, - isRuntimeJavaHomeSet ? minimumRuntimeVersion : Jvm.current().getJavaVersion() + actualRuntimeJavaHome.map( + javaHome -> determineJavaVersion( + "runtime java.home", + javaHome, + isExplicitRuntimeJavaHomeSet + ? minimumRuntimeVersion + : JavaVersion.toVersion(VersionProperties.getBundledJdkMajorVersion()) + ) ) ); - params.setIsRuntimeJavaHomeSet(isRuntimeJavaHomeSet); - JvmInstallationMetadata runtimeJdkMetaData = metadataDetector.getMetadata(getJavaInstallation(actualRuntimeJavaHome)); - params.setRuntimeJavaDetails(formatJavaVendorDetails(runtimeJdkMetaData)); + params.setIsRuntimeJavaHomeSet(isExplicitRuntimeJavaHomeSet); + params.setRuntimeJavaDetails(runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m))); params.setJavaVersions(getAvailableJavaVersions()); params.setMinimumCompilerVersion(minimumCompilerVersion); params.setMinimumRuntimeVersion(minimumRuntimeVersion); @@ -300,62 +306,30 @@ private static void assertMinimumCompilerVersion(JavaVersion minimumCompilerVers } } - private Optional findRuntimeJavaHome() { + private Provider findRuntimeJavaHome() { String runtimeJavaProperty = System.getProperty("runtime.java"); if (runtimeJavaProperty != null) { - return Optional.of(resolveJavaHomeFromToolChainService(runtimeJavaProperty)); + return resolveJavaHomeFromToolChainService(runtimeJavaProperty); } - String env = System.getenv("RUNTIME_JAVA_HOME"); - if (env != null) { - return Optional.of(new File(env)); + if (System.getenv("RUNTIME_JAVA_HOME") != null) { + return providers.provider(() -> new File(System.getenv("RUNTIME_JAVA_HOME"))); } // fall back to tool chain if set. - env = System.getenv("JAVA_TOOLCHAIN_HOME"); - return env == null ? Optional.empty() : Optional.of(new File(env)); - } - - @NotNull - private String resolveJavaHomeFromEnvVariable(String javaHomeEnvVar) { - Provider javaHomeNames = providers.gradleProperty("org.gradle.java.installations.fromEnv"); - // Provide a useful error if we're looking for a Java home version that we haven't told Gradle about yet - Arrays.stream(javaHomeNames.get().split(",")) - .filter(s -> s.equals(javaHomeEnvVar)) - .findFirst() - .orElseThrow( - () -> new GradleException( - "Environment variable '" - + javaHomeEnvVar - + "' is not registered with Gradle installation supplier. Ensure 'org.gradle.java.installations.fromEnv' is " - + "updated in gradle.properties file." - ) - ); - String versionedJavaHome = System.getenv(javaHomeEnvVar); - if (versionedJavaHome == null) { - final String exceptionMessage = String.format( - Locale.ROOT, - "$%s must be set to build Elasticsearch. " - + "Note that if the variable was just set you " - + "might have to run `./gradlew --stop` for " - + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details.", - javaHomeEnvVar - ); - throw new GradleException(exceptionMessage); - } - return versionedJavaHome; + String env = System.getenv("JAVA_TOOLCHAIN_HOME"); + return providers.provider(() -> { + if (env == null) { + return null; + } + return new File(env); + }); } @NotNull - private File resolveJavaHomeFromToolChainService(String version) { + private Provider resolveJavaHomeFromToolChainService(String version) { Property value = objectFactory.property(JavaLanguageVersion.class).value(JavaLanguageVersion.of(version)); - Provider javaLauncherProvider = toolChainService.launcherFor(javaToolchainSpec -> { - javaToolchainSpec.getLanguageVersion().value(value); - }); - return javaLauncherProvider.get().getMetadata().getInstallationPath().getAsFile(); - } - - private static String getJavaHomeEnvVarName(String version) { - return "JAVA" + version + "_HOME"; + return toolChainService.launcherFor(javaToolchainSpec -> javaToolchainSpec.getLanguageVersion().value(value)) + .map(launcher -> launcher.getMetadata().getInstallationPath().getAsFile()); } public static String getResourceContents(String resourcePath) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java index c52ea9aaeb6f5..533066168c604 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LicenseAnalyzer.java @@ -102,7 +102,7 @@ public class LicenseAnalyzer { AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE\\. + SOFTWARE\\.? """).replaceAll("\\s+", "\\\\s*"), Pattern.DOTALL)), new LicenseMatcher( "MIT-0", diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index 1fc030be42480..9e40d96438e48 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -66,10 +66,8 @@ public TaskProvider createTask(Project project) { ) ); t.dependsOn(resourcesTask); - if (BuildParams.getIsRuntimeJavaHomeSet()) { - t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath)); - } t.getTargetCompatibility().set(project.provider(BuildParams::getRuntimeJavaVersion)); + t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index 08abb02ea831e..ec79fe20492e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -52,7 +52,7 @@ public void apply(Project project) { project.getTasks().register("extractCurrentVersions", ExtractCurrentVersionsTask.class); project.getTasks().register("tagVersions", TagVersionsTask.class); - project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class); + project.getTasks().register("setCompatibleVersions", SetCompatibleVersionsTask.class, t -> t.setThisVersion(version)); final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java index 15e0a0cc345d5..17761e5183b31 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/SetCompatibleVersionsTask.java @@ -14,6 +14,7 @@ import com.github.javaparser.ast.expr.NameExpr; import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; +import org.elasticsearch.gradle.Version; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.options.Option; import org.gradle.initialization.layout.BuildLayout; @@ -28,6 +29,8 @@ public class SetCompatibleVersionsTask extends AbstractVersionsTask { + private Version thisVersion; + private Version releaseVersion; private Map versionIds = Map.of(); @Inject @@ -35,21 +38,35 @@ public SetCompatibleVersionsTask(BuildLayout layout) { super(layout); } + public void setThisVersion(Version version) { + thisVersion = version; + } + @Option(option = "version-id", description = "Version id used for the release. Of the form :.") public void versionIds(List version) { this.versionIds = splitVersionIds(version); } + @Option(option = "release", description = "The version being released") + public void releaseVersion(String version) { + releaseVersion = Version.fromString(version); + } + @TaskAction public void executeTask() throws IOException { if (versionIds.isEmpty()) { throw new IllegalArgumentException("No version ids specified"); } + + if (releaseVersion.getMajor() < thisVersion.getMajor()) { + // don't need to update CCS version - this is for a different major + return; + } + Integer transportVersion = versionIds.get(TRANSPORT_VERSION_TYPE); if (transportVersion == null) { throw new IllegalArgumentException("TransportVersion id not specified"); } - Path versionJava = rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH); CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java index 96fde95d0dd17..965f3964c9a38 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java @@ -11,7 +11,6 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.ElasticsearchDistribution; -import java.io.File; import java.util.Locale; public class TestUtil { @@ -19,8 +18,7 @@ public class TestUtil { public static String getTestLibraryPath(String nativeLibsDir) { String arch = Architecture.current().toString().toLowerCase(Locale.ROOT); String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch); - String existingLibraryPath = System.getProperty("java.library.path"); - return String.format(Locale.ROOT, "%s/%s%c%s", nativeLibsDir, platform, File.pathSeparatorChar, existingLibraryPath); + return String.format(Locale.ROOT, "%s/%s", nativeLibsDir, platform); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java new file mode 100644 index 0000000000000..30141f021935b --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/SourceDirectoryCommandLineArgumentProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.util; + +import org.gradle.api.file.Directory; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; +import org.gradle.process.CommandLineArgumentProvider; + +import java.util.Arrays; + +public class SourceDirectoryCommandLineArgumentProvider implements CommandLineArgumentProvider { + + private final Directory sourceDirectory; + + public SourceDirectoryCommandLineArgumentProvider(Directory sourceDirectory) { + this.sourceDirectory = sourceDirectory; + } + + public Iterable asArguments() { + return Arrays.asList("-s", sourceDirectory.getAsFile().getAbsolutePath()); + } + + @InputDirectory + @PathSensitive(PathSensitivity.RELATIVE) + public Directory getSourceDirectory() { + return sourceDirectory; + } +} diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index f7b1c8ff61774..8d04a0f38fab0 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.9 \ No newline at end of file +8.10 \ No newline at end of file diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java index 6087482db278d..3a425d11ccf17 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java @@ -17,6 +17,8 @@ import org.gradle.api.provider.ListProperty; import org.gradle.api.provider.MapProperty; import org.gradle.api.provider.Property; +import org.gradle.api.provider.Provider; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; @@ -92,17 +94,45 @@ public abstract class LoggedExec extends DefaultTask implements FileSystemOperat private String output; @Inject - public LoggedExec(ProjectLayout projectLayout, ExecOperations execOperations, FileSystemOperations fileSystemOperations) { + public LoggedExec( + ProjectLayout projectLayout, + ExecOperations execOperations, + FileSystemOperations fileSystemOperations, + ProviderFactory providerFactory + ) { this.projectLayout = projectLayout; this.execOperations = execOperations; this.fileSystemOperations = fileSystemOperations; getWorkingDir().convention(projectLayout.getProjectDirectory().getAsFile()); // For now mimic default behaviour of Gradle Exec task here - getEnvironment().putAll(System.getenv()); + setupDefaultEnvironment(providerFactory); getCaptureOutput().convention(false); getSpoolOutput().convention(false); } + /** + * We explicitly configure the environment variables that are passed to the executed process. + * This is required to make sure that the build cache and Gradle configuration cache is correctly configured + * can be reused across different build invocations. + * */ + private void setupDefaultEnvironment(ProviderFactory providerFactory) { + getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("BUILDKITE")); + getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("GRADLE_BUILD_CACHE")); + getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("VAULT")); + Provider javaToolchainHome = providerFactory.environmentVariable("JAVA_TOOLCHAIN_HOME"); + if (javaToolchainHome.isPresent()) { + getEnvironment().put("JAVA_TOOLCHAIN_HOME", javaToolchainHome); + } + Provider javaRuntimeHome = providerFactory.environmentVariable("RUNTIME_JAVA_HOME"); + if (javaRuntimeHome.isPresent()) { + getEnvironment().put("RUNTIME_JAVA_HOME", javaRuntimeHome); + } + Provider path = providerFactory.environmentVariable("PATH"); + if (path.isPresent()) { + getEnvironment().put("PATH", path); + } + } + @TaskAction public void run() { boolean spoolOutput = getSpoolOutput().get(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java index a1da860abe26a..9593a281686e7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java @@ -22,6 +22,9 @@ public void apply(Project project) { test.systemProperty("tests.gradle", true); test.systemProperty("tests.task", test.getPath()); + // Flag is required for later Java versions since our tests use a custom security manager + test.jvmArgs("-Djava.security.manager=allow"); + SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); // don't track these as inputs since they contain absolute paths and break cache relocatability nonInputProperties.systemProperty("gradle.dist.lib", gradle.getGradleHomeDir().getAbsolutePath() + "/lib"); diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 41f6f445f58ec..5ea970c533474 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -13,6 +13,7 @@ import org.elasticsearch.gradle.internal.test.BuildConfigurationAwareGradleRunne import org.elasticsearch.gradle.internal.test.InternalAwareGradleRunner import org.elasticsearch.gradle.internal.test.NormalizeOutputGradleRunner import org.elasticsearch.gradle.internal.test.TestResultExtension +import org.gradle.internal.component.external.model.ComponentVariant import org.gradle.testkit.runner.BuildResult import org.gradle.testkit.runner.GradleRunner import org.junit.Rule @@ -22,6 +23,7 @@ import spock.lang.TempDir import java.lang.management.ManagementFactory import java.nio.file.Files +import java.io.File import java.nio.file.Path import java.util.jar.JarEntry import java.util.jar.JarOutputStream diff --git a/catalog-info.yaml b/catalog-info.yaml index dfeeae51c1b3a..e57841c9de268 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -125,7 +125,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -142,6 +142,10 @@ spec: branch: lucene_snapshot cronline: "0 2 * * * America/New_York" message: "Builds a new lucene snapshot 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 2 * * * America/New_York" + message: "Builds a new lucene snapshot 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -169,7 +173,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -186,6 +190,10 @@ spec: branch: lucene_snapshot cronline: "0 6 * * * America/New_York" message: "Merges main into lucene_snapshot branch 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 6 * * * America/New_York" + message: "Merges main into lucene_snapshot_10 branch 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -213,7 +221,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -230,6 +238,10 @@ spec: branch: lucene_snapshot cronline: "0 9,12,15,18 * * * America/New_York" message: "Runs tests against lucene_snapshot branch several times per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 9,12,15,18 * * * America/New_York" + message: "Runs tests against lucene_snapshot_10 branch several times per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 9896fd6c84599..01ec6c118bf24 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java index 790b6bfd6deca..871cdb860a9a9 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -13,9 +13,9 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 32f35b05015b9..2a2a77a6df820 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -22,7 +22,7 @@ <% if (docker_base == 'iron_bank') { %> ARG BASE_REGISTRY=registry1.dso.mil ARG BASE_IMAGE=ironbank/redhat/ubi/ubi9 -ARG BASE_TAG=9.3 +ARG BASE_TAG=9.4 <% } %> ################################################################################ diff --git a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml index 38ce16a413af2..f4364c5008c09 100644 --- a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml +++ b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml @@ -14,7 +14,7 @@ tags: # Build args passed to Dockerfile ARGs args: BASE_IMAGE: "redhat/ubi/ubi9" - BASE_TAG: "9.3" + BASE_TAG: "9.4" # Docker image labels labels: diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index 7b904d4cb5a89..bea7fbb7f63e8 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -32,6 +32,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; /** * The main CLI for running Elasticsearch. @@ -44,6 +45,8 @@ class ServerCli extends EnvironmentAwareCommand { private final OptionSpecBuilder quietOption; private final OptionSpec enrollmentTokenOption; + // flag for indicating shutdown has begun. we use an AtomicBoolean to double as a synchronization object + private final AtomicBoolean shuttingDown = new AtomicBoolean(false); private volatile ServerProcess server; // visible for testing @@ -98,7 +101,14 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce syncPlugins(terminal, env, processInfo); ServerArgs args = createArgs(options, env, secrets, processInfo); - this.server = startServer(terminal, processInfo, args); + synchronized (shuttingDown) { + // if we are shutting down there is no reason to start the server + if (shuttingDown.get()) { + terminal.println("CLI is shutting down, skipping starting server process"); + return; + } + this.server = startServer(terminal, processInfo, args); + } } if (options.has(daemonizeOption)) { @@ -233,8 +243,11 @@ private ServerArgs createArgs(OptionSet options, Environment env, SecureSettings @Override public void close() throws IOException { - if (server != null) { - server.stop(); + synchronized (shuttingDown) { + shuttingDown.set(true); + if (server != null) { + server.stop(); + } } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 2a89f18209d11..94e2d538c0ad0 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -10,11 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.SuppressForbidden; -import java.io.File; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -25,7 +21,6 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { String distroType = sysprops.get("es.distribution.type"); boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); - String libraryPath = findLibraryPath(sysprops); return Stream.concat( Stream.of( @@ -73,8 +68,6 @@ static List systemJvmOptions(Settings nodeSettings, final Map TEST_SYSPROPS = Map.of( - "os.name", - "Linux", - "os.arch", - "aarch64", - "java.library.path", - "/usr/lib" - ); + private static final Map TEST_SYSPROPS = Map.of("os.name", "Linux", "os.arch", "aarch64"); public void testSubstitution() { final List jvmOptions = JvmOptionsParser.substitutePlaceholders( @@ -390,40 +380,4 @@ public void testCommandLineDistributionType() { final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro")); } - - public void testLibraryPath() { - assertLibraryPath("Mac OS", "aarch64", "darwin-aarch64"); - assertLibraryPath("Mac OS", "amd64", "darwin-x64"); - assertLibraryPath("Mac OS", "x86_64", "darwin-x64"); - assertLibraryPath("Linux", "aarch64", "linux-aarch64"); - assertLibraryPath("Linux", "amd64", "linux-x64"); - assertLibraryPath("Linux", "x86_64", "linux-x64"); - assertLibraryPath("Windows", "amd64", "windows-x64"); - assertLibraryPath("Windows", "x86_64", "windows-x64"); - assertLibraryPath("Unknown", "aarch64", "unsupported_os[Unknown]-aarch64"); - assertLibraryPath("Mac OS", "Unknown", "darwin-unsupported_arch[Unknown]"); - } - - private void assertLibraryPath(String os, String arch, String expected) { - String existingPath = "/usr/lib"; - var sysprops = Map.of("os.name", os, "os.arch", arch, "java.library.path", existingPath); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); - Map options = new HashMap<>(); - for (var jvmOption : jvmOptions) { - if (jvmOption.startsWith("-D")) { - String[] parts = jvmOption.substring(2).split("="); - assert parts.length == 2; - options.put(parts[0], parts[1]); - } - } - String separator = FileSystems.getDefault().getSeparator(); - assertThat( - options, - hasEntry(equalTo("java.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - assertThat( - options, - hasEntry(equalTo("jna.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java index 38a64a778fc27..e603790051c0c 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java @@ -36,6 +36,8 @@ import java.util.List; import java.util.Locale; import java.util.Optional; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -50,6 +52,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class ServerCliTests extends CommandTestCase { @@ -383,6 +386,52 @@ public void testSecureSettingsLoaderWithNullPassword() throws Exception { assertEquals("", loader.password); } + public void testProcessCreationRace() throws Exception { + for (int i = 0; i < 10; ++i) { + CyclicBarrier raceStart = new CyclicBarrier(2); + TestServerCli cli = new TestServerCli() { + @Override + void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { + super.syncPlugins(terminal, env, processInfo); + raceStart.await(); + } + + @Override + public void close() throws IOException { + try { + raceStart.await(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new AssertionError(ie); + } catch (BrokenBarrierException e) { + throw new AssertionError(e); + } + super.close(); + } + }; + Thread closeThread = new Thread(() -> { + try { + cli.close(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + closeThread.start(); + cli.main(new String[] {}, terminal, new ProcessInfo(sysprops, envVars, esHomeDir)); + closeThread.join(); + + if (cli.getServer() == null) { + // close won the race, so server should never have been started + assertThat(cli.startServerCalled, is(false)); + } else { + // creation won the race, so check we correctly waited on it and stopped + assertThat(cli.getServer(), sameInstance(mockServer)); + assertThat(mockServer.waitForCalled, is(true)); + assertThat(mockServer.stopCalled, is(true)); + } + } + } + private MockSecureSettingsLoader loadWithMockSecureSettingsLoader() throws Exception { var loader = new MockSecureSettingsLoader(); this.mockSecureSettingsLoader = loader; @@ -465,9 +514,9 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce } private class MockServerProcess extends ServerProcess { - boolean detachCalled = false; - boolean waitForCalled = false; - boolean stopCalled = false; + volatile boolean detachCalled = false; + volatile boolean waitForCalled = false; + volatile boolean stopCalled = false; MockServerProcess() { super(null, null); @@ -505,6 +554,8 @@ void reset() { } private class TestServerCli extends ServerCli { + boolean startServerCalled = false; + @Override protected Command loadTool(String toolname, String libs) { if (toolname.equals("auto-configure-node")) { @@ -551,20 +602,21 @@ protected SecureSettingsLoader secureSettingsLoader(Environment env) { return new KeystoreSecureSettingsLoader(); } + + @Override + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws Exception { + startServerCalled = true; + if (argsValidator != null) { + argsValidator.accept(args); + } + mockServer.reset(); + return mockServer; + } } @Override protected Command newCommand() { - return new TestServerCli() { - @Override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) { - if (argsValidator != null) { - argsValidator.accept(args); - } - mockServer.reset(); - return mockServer; - } - }; + return new TestServerCli(); } static class MockSecureSettingsLoader implements SecureSettingsLoader { diff --git a/docs/changelog/101373.yaml b/docs/changelog/101373.yaml deleted file mode 100644 index 53b5680301c79..0000000000000 --- a/docs/changelog/101373.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101373 -summary: Adding aggregations support for the `_ignored` field -area: Search -type: feature -issues: - - 59946 diff --git a/docs/changelog/103374.yaml b/docs/changelog/103374.yaml deleted file mode 100644 index fcdee9185eb92..0000000000000 --- a/docs/changelog/103374.yaml +++ /dev/null @@ -1,16 +0,0 @@ -pr: 103374 -summary: Cut over stored fields to ZSTD for compression -area: Search -type: enhancement -issues: [] -highlight: - title: Stored fields are now compressed with ZStandard instead of LZ4/DEFLATE - body: |- - Stored fields are now compressed by splitting documents into blocks, which - are then compressed independently with ZStandard. `index.codec: default` - (default) uses blocks of at most 14kB or 128 documents compressed with level - 0, while `index.codec: best_compression` uses blocks of at most 240kB or - 2048 documents compressed at level 3. On most datasets that we tested - against, this yielded storage improvements in the order of 10%, slightly - faster indexing and similar retrieval latencies. - notable: true diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml deleted file mode 100644 index b9190e60cc96d..0000000000000 --- a/docs/changelog/105792.yaml +++ /dev/null @@ -1,18 +0,0 @@ -pr: 105792 -summary: "Change `skip_unavailable` remote cluster setting default value to true" -area: Search -type: breaking -issues: [] -breaking: - title: "Change `skip_unavailable` remote cluster setting default value to true" - area: Cluster and node setting - details: The default value of the `skip_unavailable` setting is now set to true. - All existing and future remote clusters that do not define this setting will use the new default. - This setting only affects cross-cluster searches using the _search or _async_search API. - impact: Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless - skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. - Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked - as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a - search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be - set explicitly. - notable: true diff --git a/docs/changelog/105829.yaml b/docs/changelog/105829.yaml deleted file mode 100644 index d9f8439e4b887..0000000000000 --- a/docs/changelog/105829.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105829 -summary: Log shard movements -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/106252.yaml b/docs/changelog/106252.yaml deleted file mode 100644 index 5e3f084632b9d..0000000000000 --- a/docs/changelog/106252.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106252 -summary: Add min/max range of the `event.ingested` field to cluster state for searchable - snapshots -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/106486.yaml b/docs/changelog/106486.yaml deleted file mode 100644 index b33df50780e02..0000000000000 --- a/docs/changelog/106486.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 106486 -summary: Create custom parser for ISO-8601 datetimes -area: Infra/Core -type: enhancement -issues: - - 102063 -highlight: - title: New custom parser for ISO-8601 datetimes - body: |- - This introduces a new custom parser for ISO-8601 datetimes, for the `iso8601`, `strict_date_optional_time`, and - `strict_date_optional_time_nanos` built-in date formats. This provides a performance improvement over the - default Java date-time parsing. Whilst it maintains much of the same behaviour, - the new parser does not accept nonsensical date-time strings that have multiple fractional seconds fields - or multiple timezone specifiers. If the new parser fails to parse a string, it will then use the previous parser - to parse it. If a large proportion of the input data consists of these invalid strings, this may cause - a small performance degradation. If you wish to force the use of the old parsers regardless, - set the JVM property `es.datetime.java_time_parsers=true` on all ES nodes. diff --git a/docs/changelog/106553.yaml b/docs/changelog/106553.yaml deleted file mode 100644 index 0ec5b1bb02da8..0000000000000 --- a/docs/changelog/106553.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106553 -summary: Add support for hiragana_uppercase & katakana_uppercase token filters in kuromoji analysis plugin -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/106591.yaml b/docs/changelog/106591.yaml deleted file mode 100644 index 6a7814cb9cede..0000000000000 --- a/docs/changelog/106591.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106591 -summary: Make dense vector field type updatable -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/106820.yaml b/docs/changelog/106820.yaml deleted file mode 100644 index d854e3984c13d..0000000000000 --- a/docs/changelog/106820.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106820 -summary: Add a capabilities API to check node and cluster capabilities -area: Infra/REST API -type: feature -issues: [] diff --git a/docs/changelog/107081.yaml b/docs/changelog/107081.yaml deleted file mode 100644 index 2acd2f919b476..0000000000000 --- a/docs/changelog/107081.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107081 -summary: Implement synthetic source support for range fields -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/107088.yaml b/docs/changelog/107088.yaml deleted file mode 100644 index 01a926f185eea..0000000000000 --- a/docs/changelog/107088.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107088 -summary: Introduce role description field -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/107191.yaml b/docs/changelog/107191.yaml deleted file mode 100644 index 5ef6297c0f3f1..0000000000000 --- a/docs/changelog/107191.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 107191 -summary: Stricter failure handling in multi-repo get-snapshots request handling -area: Snapshot/Restore -type: bug -issues: [] -highlight: - title: Stricter failure handling in multi-repo get-snapshots request handling - body: | - If a multi-repo get-snapshots request encounters a failure in one of the - targeted repositories then earlier versions of Elasticsearch would proceed - as if the faulty repository did not exist, except for a per-repository - failure report in a separate section of the response body. This makes it - impossible to paginate the results properly in the presence of failures. In - versions 8.15.0 and later this API's failure handling behaviour has been - made stricter, reporting an overall failure if any targeted repository's - contents cannot be listed. - notable: true diff --git a/docs/changelog/107216.yaml b/docs/changelog/107216.yaml deleted file mode 100644 index 7144eedf9bea4..0000000000000 --- a/docs/changelog/107216.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107216 -summary: Add per-field KNN vector format to Index Segments API -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/107240.yaml b/docs/changelog/107240.yaml deleted file mode 100644 index baf4c222a9a27..0000000000000 --- a/docs/changelog/107240.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107240 -summary: Include doc size info in ingest stats -area: Ingest Node -type: enhancement -issues: - - 106386 diff --git a/docs/changelog/107244.yaml b/docs/changelog/107244.yaml deleted file mode 100644 index f805796674f93..0000000000000 --- a/docs/changelog/107244.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107244 -summary: Support effective watermark thresholds in node stats API -area: Allocation -type: enhancement -issues: [106676] diff --git a/docs/changelog/107279.yaml b/docs/changelog/107279.yaml deleted file mode 100644 index a2940ecc9ba2d..0000000000000 --- a/docs/changelog/107279.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107279 -summary: Introduce _transform/_node_stats API -area: Transform -type: feature -issues: [] diff --git a/docs/changelog/107409.yaml b/docs/changelog/107409.yaml deleted file mode 100644 index 6f2350239772f..0000000000000 --- a/docs/changelog/107409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107409 -summary: "ESQL: Introduce a casting operator, `::`" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/107410.yaml b/docs/changelog/107410.yaml deleted file mode 100644 index 5026e88cfa762..0000000000000 --- a/docs/changelog/107410.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107410 -summary: Cluster-state based Security role mapper -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/107415.yaml b/docs/changelog/107415.yaml deleted file mode 100644 index 8877d0426c60d..0000000000000 --- a/docs/changelog/107415.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107415 -summary: Fix `DecayFunctions'` `toString` -area: Search -type: bug -issues: - - 100870 diff --git a/docs/changelog/107426.yaml b/docs/changelog/107426.yaml deleted file mode 100644 index 2feed3df56108..0000000000000 --- a/docs/changelog/107426.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107426 -summary: Support wait indefinitely for search tasks to complete on node shutdown -area: Infra/Node Lifecycle -type: bug -issues: [] diff --git a/docs/changelog/107435.yaml b/docs/changelog/107435.yaml deleted file mode 100644 index ae5d2215419c4..0000000000000 --- a/docs/changelog/107435.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107435 -summary: '`NoSuchRemoteClusterException` should not be thrown when a remote is configured' -area: Network -type: bug -issues: - - 107381 diff --git a/docs/changelog/107493.yaml b/docs/changelog/107493.yaml deleted file mode 100644 index dfd45e1493c95..0000000000000 --- a/docs/changelog/107493.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107493 -summary: Remote cluster - API key security model - cluster privileges -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/107545.yaml b/docs/changelog/107545.yaml deleted file mode 100644 index ad457cc5a533f..0000000000000 --- a/docs/changelog/107545.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107545 -summary: "ESQL: Union Types Support" -area: ES|QL -type: enhancement -issues: - - 100603 diff --git a/docs/changelog/107549.yaml b/docs/changelog/107549.yaml deleted file mode 100644 index 36250cf65b4d9..0000000000000 --- a/docs/changelog/107549.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107549 -summary: Add synthetic source support for binary fields -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/107567.yaml b/docs/changelog/107567.yaml deleted file mode 100644 index 558b5b570b1fb..0000000000000 --- a/docs/changelog/107567.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107567 -summary: Add ignored field values to synthetic source -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/107579.yaml b/docs/changelog/107579.yaml deleted file mode 100644 index fdee59424b8de..0000000000000 --- a/docs/changelog/107579.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107579 -summary: Adding `hits_time_in_millis` and `misses_time_in_millis` to enrich cache - stats -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/107593.yaml b/docs/changelog/107593.yaml deleted file mode 100644 index 2e3d2cbc80119..0000000000000 --- a/docs/changelog/107593.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107593 -summary: Add auto-sharding APM metrics -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/107640.yaml b/docs/changelog/107640.yaml deleted file mode 100644 index 9871943481f20..0000000000000 --- a/docs/changelog/107640.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107640 -summary: "Unified Highlighter to support matched_fields " -area: Highlighting -type: enhancement -issues: - - 5172 diff --git a/docs/changelog/107645.yaml b/docs/changelog/107645.yaml deleted file mode 100644 index 93fc0f2a89b3a..0000000000000 --- a/docs/changelog/107645.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 107645 -summary: Add `_name` support for top level `knn` clauses -area: Search -type: enhancement -issues: - - 106254 - - 107448 diff --git a/docs/changelog/107647.yaml b/docs/changelog/107647.yaml deleted file mode 100644 index 97d98a7c91079..0000000000000 --- a/docs/changelog/107647.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107647 -summary: Adding human readable times to geoip stats -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/107663.yaml b/docs/changelog/107663.yaml deleted file mode 100644 index a7c3dc185425a..0000000000000 --- a/docs/changelog/107663.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107663 -summary: Optimize `GeoBounds` and `GeoCentroid` aggregations for single value fields -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/107675.yaml b/docs/changelog/107675.yaml deleted file mode 100644 index b1d51cd3f8538..0000000000000 --- a/docs/changelog/107675.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 107675 -summary: Interpret `?timeout=-1` as infinite ack timeout -area: Cluster Coordination -type: breaking -issues: [] -breaking: - title: Interpret `?timeout=-1` as infinite ack timeout - area: REST API - details: | - Today {es} accepts the parameter `?timeout=-1` in many APIs, but interprets - this to mean the same as `?timeout=0`. From 8.15 onwards `?timeout=-1` will - mean to wait indefinitely, aligning the behaviour of this parameter with - other similar parameters such as `?master_timeout`. - impact: | - Use `?timeout=0` to force relevant operations to time out immediately - instead of `?timeout=-1` - notable: false diff --git a/docs/changelog/107676.yaml b/docs/changelog/107676.yaml deleted file mode 100644 index b14bc29e66efd..0000000000000 --- a/docs/changelog/107676.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107676 -summary: Add model download progress to the download task status -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/107706.yaml b/docs/changelog/107706.yaml deleted file mode 100644 index 76b7f662bf0e0..0000000000000 --- a/docs/changelog/107706.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107706 -summary: Add rate limiting support for the Inference API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/107735.yaml b/docs/changelog/107735.yaml deleted file mode 100644 index 372cb59ba8b1f..0000000000000 --- a/docs/changelog/107735.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107735 -summary: Implement synthetic source support for annotated text field -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/107739.yaml b/docs/changelog/107739.yaml deleted file mode 100644 index c55a0e332b4f6..0000000000000 --- a/docs/changelog/107739.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107739 -summary: Binary field enables doc values by default for index mode with synthe… -area: Mapping -type: enhancement -issues: - - 107554 diff --git a/docs/changelog/107764.yaml b/docs/changelog/107764.yaml deleted file mode 100644 index 3f83efc789014..0000000000000 --- a/docs/changelog/107764.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107764 -summary: Increase size of big arrays only when there is an actual value in the aggregators -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107779.yaml b/docs/changelog/107779.yaml deleted file mode 100644 index a41c19a2329e0..0000000000000 --- a/docs/changelog/107779.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107779 -summary: Allow rescorer with field collapsing -area: Search -type: enhancement -issues: - - 27243 \ No newline at end of file diff --git a/docs/changelog/107792.yaml b/docs/changelog/107792.yaml deleted file mode 100644 index bd9730d49d5d6..0000000000000 --- a/docs/changelog/107792.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107792 -summary: Halt Indexer on Stop/Abort API -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/107813.yaml b/docs/changelog/107813.yaml deleted file mode 100644 index 1cbb518a8be5b..0000000000000 --- a/docs/changelog/107813.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107813 -summary: Increase size of big arrays only when there is an actual value in the aggregators - (Analytics module) -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107827.yaml b/docs/changelog/107827.yaml deleted file mode 100644 index 7cf217567b745..0000000000000 --- a/docs/changelog/107827.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107827 -summary: Add permission to secure access to certain config files -area: Security -type: bug -issues: [] diff --git a/docs/changelog/107832.yaml b/docs/changelog/107832.yaml deleted file mode 100644 index 491c491736005..0000000000000 --- a/docs/changelog/107832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107832 -summary: Optimise few metric aggregations for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107862.yaml b/docs/changelog/107862.yaml deleted file mode 100644 index 77f7a8c9fb02a..0000000000000 --- a/docs/changelog/107862.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107862 -summary: Fix serialization of put-shutdown request -area: Infra/Node Lifecycle -type: bug -issues: - - 107857 diff --git a/docs/changelog/107876.yaml b/docs/changelog/107876.yaml deleted file mode 100644 index 21624cacf7e1d..0000000000000 --- a/docs/changelog/107876.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107876 -summary: "ESQL: Add aggregates node level reduction" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/107877.yaml b/docs/changelog/107877.yaml deleted file mode 100644 index cf458b3aa3a42..0000000000000 --- a/docs/changelog/107877.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107877 -summary: Support metrics counter types in ESQL -area: "ES|QL" -type: enhancement -issues: [] diff --git a/docs/changelog/107886.yaml b/docs/changelog/107886.yaml deleted file mode 100644 index a328bc2a2a208..0000000000000 --- a/docs/changelog/107886.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107886 -summary: Cluster state role mapper file settings service -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/107892.yaml b/docs/changelog/107892.yaml deleted file mode 100644 index 5fd5404c48d02..0000000000000 --- a/docs/changelog/107892.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107892 -summary: Optimise cardinality aggregations for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107893.yaml b/docs/changelog/107893.yaml deleted file mode 100644 index 61f0f4d76e679..0000000000000 --- a/docs/changelog/107893.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107893 -summary: Optimise histogram aggregations for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107897.yaml b/docs/changelog/107897.yaml deleted file mode 100644 index e4a2a5270475d..0000000000000 --- a/docs/changelog/107897.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107897 -summary: Optimise composite aggregations for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107917.yaml b/docs/changelog/107917.yaml deleted file mode 100644 index 18125bf46f4b7..0000000000000 --- a/docs/changelog/107917.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107917 -summary: Exit gracefully when deleted -area: Transform -type: bug -issues: - - 107266 diff --git a/docs/changelog/107922.yaml b/docs/changelog/107922.yaml deleted file mode 100644 index e28d0f6262af4..0000000000000 --- a/docs/changelog/107922.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107922 -summary: Feature/annotated text store defaults -area: Mapping -type: enhancement -issues: - - 107734 diff --git a/docs/changelog/107930.yaml b/docs/changelog/107930.yaml deleted file mode 100644 index 90af5c55b8604..0000000000000 --- a/docs/changelog/107930.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107930 -summary: Optimise terms aggregations for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107937.yaml b/docs/changelog/107937.yaml deleted file mode 100644 index 5938c8e8b6602..0000000000000 --- a/docs/changelog/107937.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107937 -summary: Optimise multiterms aggregation for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/107947.yaml b/docs/changelog/107947.yaml deleted file mode 100644 index 637ac3c005779..0000000000000 --- a/docs/changelog/107947.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107947 -summary: "ESQL: Fix equals `hashCode` for functions" -area: ES|QL -type: bug -issues: - - 104393 diff --git a/docs/changelog/107967.yaml b/docs/changelog/107967.yaml deleted file mode 100644 index 159370e44f236..0000000000000 --- a/docs/changelog/107967.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107967 -summary: Sort time series indices by time range in `GetDataStreams` API -area: TSDB -type: bug -issues: - - 102088 diff --git a/docs/changelog/107972.yaml b/docs/changelog/107972.yaml deleted file mode 100644 index 3ec83d6a56954..0000000000000 --- a/docs/changelog/107972.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107972 -summary: Require question to be non-null in `QuestionAnsweringConfig` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/107977.yaml b/docs/changelog/107977.yaml deleted file mode 100644 index fdbbb57d7e48f..0000000000000 --- a/docs/changelog/107977.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107977 -summary: Fix off by one error when handling null values in range fields -area: Mapping -type: bug -issues: - - 107282 diff --git a/docs/changelog/107978.yaml b/docs/changelog/107978.yaml deleted file mode 100644 index 50115df9ee092..0000000000000 --- a/docs/changelog/107978.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107978 -summary: Drop shards close timeout when stopping node. -area: Engine -type: enhancement -issues: - - 107938 diff --git a/docs/changelog/107987.yaml b/docs/changelog/107987.yaml deleted file mode 100644 index e8afebde0b190..0000000000000 --- a/docs/changelog/107987.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107987 -summary: "ESQL: Implement LOOKUP, an \"inline\" enrich" -area: ES|QL -type: enhancement -issues: - - 107306 diff --git a/docs/changelog/107990.yaml b/docs/changelog/107990.yaml deleted file mode 100644 index 80cb96aca4426..0000000000000 --- a/docs/changelog/107990.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 107990 -summary: Optimise `time_series` aggregation for single value fields -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/108016.yaml b/docs/changelog/108016.yaml deleted file mode 100644 index 0aa3f86a6f859..0000000000000 --- a/docs/changelog/108016.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108016 -summary: Optimise `BinaryRangeAggregator` for single value fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/108019.yaml b/docs/changelog/108019.yaml deleted file mode 100644 index 69e8e9fd371f8..0000000000000 --- a/docs/changelog/108019.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108019 -summary: Ignore additional cpu.stat fields -area: Infra/Core -type: bug -issues: - - 107983 diff --git a/docs/changelog/108051.yaml b/docs/changelog/108051.yaml deleted file mode 100644 index a47e1192c6090..0000000000000 --- a/docs/changelog/108051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108051 -summary: Track synthetic source for disabled objects -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/108065.yaml b/docs/changelog/108065.yaml deleted file mode 100644 index 2ec93bf6e6295..0000000000000 --- a/docs/changelog/108065.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108065 -summary: '`DenseVectorFieldMapper` fixed typo' -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/108070.yaml b/docs/changelog/108070.yaml deleted file mode 100644 index cde191aa50804..0000000000000 --- a/docs/changelog/108070.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108070 -summary: Redirect `VersionConflict` to reset code -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/108088.yaml b/docs/changelog/108088.yaml deleted file mode 100644 index 95c58f6dc19f1..0000000000000 --- a/docs/changelog/108088.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108088 -summary: Add a SIMD (AVX2) optimised vector distance function for int7 on x64 -area: "Search" -type: enhancement -issues: [] diff --git a/docs/changelog/108089.yaml b/docs/changelog/108089.yaml deleted file mode 100644 index 02fb6349185a6..0000000000000 --- a/docs/changelog/108089.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108089 -summary: "ES|QL: limit query depth to 500 levels" -area: ES|QL -type: bug -issues: - - 107752 diff --git a/docs/changelog/108106.yaml b/docs/changelog/108106.yaml deleted file mode 100644 index e9dd438e620c4..0000000000000 --- a/docs/changelog/108106.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108106 -summary: Simulate should succeed if `ignore_missing_pipeline` -area: Ingest Node -type: bug -issues: - - 107314 diff --git a/docs/changelog/108118.yaml b/docs/changelog/108118.yaml deleted file mode 100644 index b9b0f1c1406e0..0000000000000 --- a/docs/changelog/108118.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108118 -summary: Optimize for single value in ordinals grouping -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108122.yaml b/docs/changelog/108122.yaml deleted file mode 100644 index 981ab39b9dad8..0000000000000 --- a/docs/changelog/108122.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108122 -summary: Correct query profiling for conjunctions -area: Search -type: bug -issues: - - 108116 diff --git a/docs/changelog/108130.yaml b/docs/changelog/108130.yaml deleted file mode 100644 index 5b431bdb0cc1b..0000000000000 --- a/docs/changelog/108130.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108130 -summary: Optimise frequent item sets aggregation for single value fields -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/108131.yaml b/docs/changelog/108131.yaml deleted file mode 100644 index 7a4286c1e44a0..0000000000000 --- a/docs/changelog/108131.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108131 -summary: "Inference Processor: skip inference when all fields are missing" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/108144.yaml b/docs/changelog/108144.yaml deleted file mode 100644 index 6ff5b1d600d0e..0000000000000 --- a/docs/changelog/108144.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108144 -summary: Bump Tika dependency to 2.9.2 -area: Ingest Node -type: upgrade -issues: [] diff --git a/docs/changelog/108145.yaml b/docs/changelog/108145.yaml deleted file mode 100644 index b8c9428c1e3a8..0000000000000 --- a/docs/changelog/108145.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108145 -summary: Async close of `IndexShard` -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/108146.yaml b/docs/changelog/108146.yaml deleted file mode 100644 index 2a4f917134090..0000000000000 --- a/docs/changelog/108146.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108146 -summary: Allow deletion of the ELSER inference service when reference in ingest -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/108155.yaml b/docs/changelog/108155.yaml deleted file mode 100644 index 57db86b4005b9..0000000000000 --- a/docs/changelog/108155.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108155 -summary: Upgrade to Netty 4.1.109 -area: Network -type: upgrade -issues: [] diff --git a/docs/changelog/108161.yaml b/docs/changelog/108161.yaml deleted file mode 100644 index 73fa41e2089d3..0000000000000 --- a/docs/changelog/108161.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108161 -summary: Refactor TextEmbeddingResults to use primitives rather than objects -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/108165.yaml b/docs/changelog/108165.yaml deleted file mode 100644 index b88b0f5e217dd..0000000000000 --- a/docs/changelog/108165.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108165 -summary: Add `BlockHash` for 3 `BytesRefs` -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108171.yaml b/docs/changelog/108171.yaml deleted file mode 100644 index 1ec17bb3e411d..0000000000000 --- a/docs/changelog/108171.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108171 -summary: "add Elastic-internal stable bridge api for use by Logstash" -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/108222.yaml b/docs/changelog/108222.yaml deleted file mode 100644 index 701b853441e32..0000000000000 --- a/docs/changelog/108222.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108222 -summary: Add generic fallback implementation for synthetic source -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/108223.yaml b/docs/changelog/108223.yaml deleted file mode 100644 index ba8756a8f9c68..0000000000000 --- a/docs/changelog/108223.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108223 -summary: Upgrade bouncy castle (non-fips) to 1.78.1 -area: Security -type: upgrade -issues: [] diff --git a/docs/changelog/108227.yaml b/docs/changelog/108227.yaml deleted file mode 100644 index 79f69bc4aaff6..0000000000000 --- a/docs/changelog/108227.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108227 -summary: "Apm-data: improve indexing resilience" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108254.yaml b/docs/changelog/108254.yaml deleted file mode 100644 index 3bf08e8b8f5fc..0000000000000 --- a/docs/changelog/108254.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108254 -summary: Add `sparse_vector` query -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/108266.yaml b/docs/changelog/108266.yaml deleted file mode 100644 index 5a189cfcdc258..0000000000000 --- a/docs/changelog/108266.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108266 -summary: Log details of non-green indicators in `HealthPeriodicLogger` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/108300.yaml b/docs/changelog/108300.yaml deleted file mode 100644 index c4d6e468113a4..0000000000000 --- a/docs/changelog/108300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108300 -summary: "ESQL: Add more time span units" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108306.yaml b/docs/changelog/108306.yaml deleted file mode 100644 index 7a104ce880f43..0000000000000 --- a/docs/changelog/108306.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108306 -summary: Enable inter-segment concurrency for low cardinality numeric terms aggs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/108333.yaml b/docs/changelog/108333.yaml deleted file mode 100644 index c3152500ce1b2..0000000000000 --- a/docs/changelog/108333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108333 -summary: Allow `read_slm` to call GET /_slm/status -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/108340.yaml b/docs/changelog/108340.yaml deleted file mode 100644 index fb2ea72c0a0f5..0000000000000 --- a/docs/changelog/108340.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108340 -summary: "Apm-data: increase version for templates" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108349.yaml b/docs/changelog/108349.yaml deleted file mode 100644 index 6d9ea3d658dca..0000000000000 --- a/docs/changelog/108349.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108349 -summary: "Ecs@mappings: reduce scope for `ecs_geo_point`" -area: Data streams -type: bug -issues: - - 108338 diff --git a/docs/changelog/108379.yaml b/docs/changelog/108379.yaml deleted file mode 100644 index 312856a5db33d..0000000000000 --- a/docs/changelog/108379.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108379 -summary: Create a new `NodeRequest` for every `NodesDataTiersUsageTransport` use -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/108394.yaml b/docs/changelog/108394.yaml deleted file mode 100644 index 58f48fa548c6e..0000000000000 --- a/docs/changelog/108394.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108394 -summary: Handle `IndexNotFoundException` -area: Transform -type: bug -issues: - - 107263 diff --git a/docs/changelog/108395.yaml b/docs/changelog/108395.yaml deleted file mode 100644 index c33cf169a99fa..0000000000000 --- a/docs/changelog/108395.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108395 -summary: "ESQL: change from quoting from backtick to quote" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/108396.yaml b/docs/changelog/108396.yaml deleted file mode 100644 index 63937646b755c..0000000000000 --- a/docs/changelog/108396.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108396 -summary: "Apm-data: improve default pipeline performance" -area: Data streams -type: enhancement -issues: - - 108290 diff --git a/docs/changelog/108409.yaml b/docs/changelog/108409.yaml deleted file mode 100644 index 6cff86cf93930..0000000000000 --- a/docs/changelog/108409.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108409 -summary: Support multiple associated groups for TopN -area: Application -type: enhancement -issues: - - 108018 diff --git a/docs/changelog/108410.yaml b/docs/changelog/108410.yaml deleted file mode 100644 index 5fd831231a3be..0000000000000 --- a/docs/changelog/108410.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108410 -summary: GeoIP tasks should wait longer for master -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/108417.yaml b/docs/changelog/108417.yaml deleted file mode 100644 index bb650922f1be5..0000000000000 --- a/docs/changelog/108417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108417 -summary: Track source for arrays of objects -area: Mapping -type: enhancement -issues: - - 90708 diff --git a/docs/changelog/108421.yaml b/docs/changelog/108421.yaml deleted file mode 100644 index 1f077a4a2cb7c..0000000000000 --- a/docs/changelog/108421.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108421 -summary: "[ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest`" -area: ES|QL -type: enhancement -issues: - - 107029 diff --git a/docs/changelog/108429.yaml b/docs/changelog/108429.yaml deleted file mode 100644 index 562454a0de256..0000000000000 --- a/docs/changelog/108429.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108429 -summary: Fix `ClassCastException` in Significant Terms -area: Aggregations -type: bug -issues: - - 108427 diff --git a/docs/changelog/108444.yaml b/docs/changelog/108444.yaml deleted file mode 100644 index c946ab24f939a..0000000000000 --- a/docs/changelog/108444.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108444 -summary: "Apm-data: ignore malformed fields, and too many dynamic fields" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108452.yaml b/docs/changelog/108452.yaml deleted file mode 100644 index fdf531602c806..0000000000000 --- a/docs/changelog/108452.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108452 -summary: Add the rerank task to the Elasticsearch internal inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/108455.yaml b/docs/changelog/108455.yaml deleted file mode 100644 index 8397af7b07cf1..0000000000000 --- a/docs/changelog/108455.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108455 -summary: "[ES|QL] Convert string to datetime when the other size of an arithmetic\ - \ operator is `date_period` or `time_duration`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108459.yaml b/docs/changelog/108459.yaml deleted file mode 100644 index 5e05797f284be..0000000000000 --- a/docs/changelog/108459.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108459 -summary: Do not use global ordinals strategy if the leaf reader context cannot be - obtained -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/108472.yaml b/docs/changelog/108472.yaml deleted file mode 100644 index 82481e4edec3a..0000000000000 --- a/docs/changelog/108472.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108472 -summary: Add support for Azure AI Studio embeddings and completions to the inference service. -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/108517.yaml b/docs/changelog/108517.yaml deleted file mode 100644 index 359c8302fdf6c..0000000000000 --- a/docs/changelog/108517.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108517 -summary: Forward `indexServiceSafe` exception to listener -area: Transform -type: bug -issues: - - 108418 diff --git a/docs/changelog/108521.yaml b/docs/changelog/108521.yaml deleted file mode 100644 index adc7c11a4decd..0000000000000 --- a/docs/changelog/108521.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108521 -summary: Adding override for lintian false positive on `libvec.so` -area: "Packaging" -type: bug -issues: - - 108514 diff --git a/docs/changelog/108522.yaml b/docs/changelog/108522.yaml deleted file mode 100644 index 5bc064d7995e9..0000000000000 --- a/docs/changelog/108522.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108522 -summary: Ensure we return non-negative scores when scoring scalar dot-products -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/108537.yaml b/docs/changelog/108537.yaml deleted file mode 100644 index 1c0228a71d449..0000000000000 --- a/docs/changelog/108537.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108537 -summary: Limit the value in prefix query -area: Search -type: enhancement -issues: - - 108486 diff --git a/docs/changelog/108538.yaml b/docs/changelog/108538.yaml deleted file mode 100644 index 10ae49f0c1670..0000000000000 --- a/docs/changelog/108538.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108538 -summary: Adding RankFeature search phase implementation -area: Search -type: feature -issues: [] diff --git a/docs/changelog/108574.yaml b/docs/changelog/108574.yaml deleted file mode 100644 index b3c957721e01e..0000000000000 --- a/docs/changelog/108574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108574 -summary: "[ESQL] CBRT function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108602.yaml b/docs/changelog/108602.yaml deleted file mode 100644 index d544c89980123..0000000000000 --- a/docs/changelog/108602.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108602 -summary: "[Inference API] Extract optional long instead of integer in `RateLimitSettings#of`" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/108606.yaml b/docs/changelog/108606.yaml deleted file mode 100644 index 04780bff58800..0000000000000 --- a/docs/changelog/108606.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 108606 -summary: "Extend ISO8601 datetime parser to specify forbidden fields, allowing it to be used\ - \ on more formats" -area: Infra/Core -type: enhancement -issues: [] -highlight: - title: New custom parser for more ISO-8601 date formats - body: |- - Following on from #106486, this extends the custom ISO-8601 datetime parser to cover the `strict_year`, - `strict_year_month`, `strict_date_time`, `strict_date_time_no_millis`, `strict_date_hour_minute_second`, - `strict_date_hour_minute_second_millis`, and `strict_date_hour_minute_second_fraction` date formats. - As before, the parser will use the existing java.time parser if there are parsing issues, and the - `es.datetime.java_time_parsers=true` JVM property will force the use of the old parsers regardless. diff --git a/docs/changelog/108607.yaml b/docs/changelog/108607.yaml deleted file mode 100644 index 9ad4cf91e67b9..0000000000000 --- a/docs/changelog/108607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108607 -summary: Specify parse index when error occurs on multiple datetime parses -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/108612.yaml b/docs/changelog/108612.yaml deleted file mode 100644 index 7a3dfa2b7ba44..0000000000000 --- a/docs/changelog/108612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108612 -summary: "[Connector API] Change `UpdateConnectorFiltering` API to have better defaults" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/108624.yaml b/docs/changelog/108624.yaml deleted file mode 100644 index 0da1fd2902c03..0000000000000 --- a/docs/changelog/108624.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 108624 -summary: Disallow new rollup jobs in clusters with no rollup usage -area: Rollup -type: breaking -issues: - - 108381 -breaking: - title: Disallow new rollup jobs in clusters with no rollup usage - area: Rollup - details: The put rollup API will fail with an error when a rollup job is created in a cluster with no rollup usage - impact: Clusters with no rollup usage (either no rollup job or index) can not create new rollup jobs - notable: true diff --git a/docs/changelog/108639.yaml b/docs/changelog/108639.yaml deleted file mode 100644 index e4964cbeb0285..0000000000000 --- a/docs/changelog/108639.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108639 -summary: Add support for the 'Domain' database to the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108643.yaml b/docs/changelog/108643.yaml deleted file mode 100644 index f71a943673326..0000000000000 --- a/docs/changelog/108643.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108643 -summary: Use `scheduleUnlessShuttingDown` in `LeaderChecker` -area: Cluster Coordination -type: bug -issues: - - 108642 diff --git a/docs/changelog/108651.yaml b/docs/changelog/108651.yaml deleted file mode 100644 index 227c464909d50..0000000000000 --- a/docs/changelog/108651.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108651 -summary: Add support for the 'ISP' database to the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108672.yaml b/docs/changelog/108672.yaml deleted file mode 100644 index e1261fcf6f232..0000000000000 --- a/docs/changelog/108672.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108672 -summary: Add bounds checking to parsing ISO8601 timezone offset values -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/108679.yaml b/docs/changelog/108679.yaml deleted file mode 100644 index 62cd82a52c5bb..0000000000000 --- a/docs/changelog/108679.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108679 -summary: Suppress deprecation warnings from ingest pipelines when deleting trained model -area: Machine Learning -type: bug -issues: - - 105004 diff --git a/docs/changelog/108682.yaml b/docs/changelog/108682.yaml deleted file mode 100644 index bd566acab8306..0000000000000 --- a/docs/changelog/108682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108682 -summary: Adding support for explain in rrf -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/108683.yaml b/docs/changelog/108683.yaml deleted file mode 100644 index b9e7df5fefc18..0000000000000 --- a/docs/changelog/108683.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 108683 -summary: Add support for the 'Connection Type' database to the geoip processor -area: Ingest Node -type: enhancement -issues: [] -highlight: - title: "Preview: Support for the 'Connection Type, 'Domain', and 'ISP' databases in the geoip processor" - body: |- - As a Technical Preview, the {ref}/geoip-processor.html[`geoip`] processor can now use the commercial - https://dev.maxmind.com/geoip/docs/databases/connection-type[GeoIP2 'Connection Type'], - https://dev.maxmind.com/geoip/docs/databases/domain[GeoIP2 'Domain'], - and - https://dev.maxmind.com/geoip/docs/databases/isp[GeoIP2 'ISP'] - databases from MaxMind. diff --git a/docs/changelog/108684.yaml b/docs/changelog/108684.yaml deleted file mode 100644 index 91684d2998be6..0000000000000 --- a/docs/changelog/108684.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108684 -summary: Check if `CsvTests` required capabilities exist -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108687.yaml b/docs/changelog/108687.yaml deleted file mode 100644 index 771516d551567..0000000000000 --- a/docs/changelog/108687.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108687 -summary: Adding `user_type` support for the enterprise database for the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108693.yaml b/docs/changelog/108693.yaml deleted file mode 100644 index ee701e0f57736..0000000000000 --- a/docs/changelog/108693.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108693 -summary: Test pipeline run after reroute -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108705.yaml b/docs/changelog/108705.yaml deleted file mode 100644 index fd08734831018..0000000000000 --- a/docs/changelog/108705.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108705 -summary: Associate restore snapshot task to parent mount task -area: Distributed -type: bug -issues: - - 105830 diff --git a/docs/changelog/108713.yaml b/docs/changelog/108713.yaml deleted file mode 100644 index d6b1ddabd6c1e..0000000000000 --- a/docs/changelog/108713.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108713 -summary: Rewrite away type converting functions that do not convert types -area: ES|QL -type: enhancement -issues: - - 107716 diff --git a/docs/changelog/108726.yaml b/docs/changelog/108726.yaml deleted file mode 100644 index 2e800a45e6975..0000000000000 --- a/docs/changelog/108726.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108726 -summary: Allow RA metrics to be reported upon parsing completed or accumulated -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/108733.yaml b/docs/changelog/108733.yaml deleted file mode 100644 index 76a969219ea4c..0000000000000 --- a/docs/changelog/108733.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108733 -summary: Query Roles API -area: Security -type: feature -issues: [] diff --git a/docs/changelog/108746.yaml b/docs/changelog/108746.yaml deleted file mode 100644 index 93ed917f3b56e..0000000000000 --- a/docs/changelog/108746.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108746 -summary: Support synthetic source for `aggregate_metric_double` when ignore_malf… -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/108759.yaml b/docs/changelog/108759.yaml deleted file mode 100644 index dfc2b30fe6c57..0000000000000 --- a/docs/changelog/108759.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108759 -summary: Expose `?master_timeout` in autoscaling APIs -area: Autoscaling -type: bug -issues: [] diff --git a/docs/changelog/108761.yaml b/docs/changelog/108761.yaml deleted file mode 100644 index 92aa67ebe0bfe..0000000000000 --- a/docs/changelog/108761.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108761 -summary: Add some missing timeout params to REST API specs -area: Infra/REST API -type: bug -issues: [] diff --git a/docs/changelog/108764.yaml b/docs/changelog/108764.yaml deleted file mode 100644 index 94de27eb52c9b..0000000000000 --- a/docs/changelog/108764.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108764 -summary: ST_DISTANCE Function -area: ES|QL -type: enhancement -issues: - - 108212 diff --git a/docs/changelog/108780.yaml b/docs/changelog/108780.yaml deleted file mode 100644 index 40e66326e6b9b..0000000000000 --- a/docs/changelog/108780.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108780 -summary: Add `continent_code` support to the geoip processor -area: Ingest Node -type: enhancement -issues: - - 85820 diff --git a/docs/changelog/108786.yaml b/docs/changelog/108786.yaml deleted file mode 100644 index 1c07a3ceac900..0000000000000 --- a/docs/changelog/108786.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108786 -summary: Make ingest byte stat names more descriptive -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108793.yaml b/docs/changelog/108793.yaml deleted file mode 100644 index 87668c8ee009b..0000000000000 --- a/docs/changelog/108793.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108793 -summary: Add `SparseVectorStats` -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/108796.yaml b/docs/changelog/108796.yaml deleted file mode 100644 index 808247cf347d9..0000000000000 --- a/docs/changelog/108796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108796 -summary: Return ingest byte stats even when 0-valued -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/108814.yaml b/docs/changelog/108814.yaml deleted file mode 100644 index 94298838c372e..0000000000000 --- a/docs/changelog/108814.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108814 -summary: Deserialize publish requests on generic thread-pool -area: Cluster Coordination -type: bug -issues: - - 106352 diff --git a/docs/changelog/108818.yaml b/docs/changelog/108818.yaml deleted file mode 100644 index ed60fb5f64abd..0000000000000 --- a/docs/changelog/108818.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108818 -summary: Store source for nested objects -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/108820.yaml b/docs/changelog/108820.yaml deleted file mode 100644 index 55045ffce3dfa..0000000000000 --- a/docs/changelog/108820.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108820 -summary: Allow `LuceneSourceOperator` to early terminate -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/108822.yaml b/docs/changelog/108822.yaml deleted file mode 100644 index 8cec4da5dbc7f..0000000000000 --- a/docs/changelog/108822.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108822 -summary: Update ASM to 9.7 for plugin scanner -area: Infra/Plugins -type: upgrade -issues: - - 108776 diff --git a/docs/changelog/108831.yaml b/docs/changelog/108831.yaml deleted file mode 100644 index 496bc0108f9d2..0000000000000 --- a/docs/changelog/108831.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108831 -summary: Rename rule query and add support for multiple rulesets -area: Application -type: enhancement -issues: [ ] diff --git a/docs/changelog/108849.yaml b/docs/changelog/108849.yaml deleted file mode 100644 index 7c503efe9187b..0000000000000 --- a/docs/changelog/108849.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108849 -summary: "[Osquery] Extend `kibana_system` role with an access to new `osquery_manager`\ - \ index" -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/108856.yaml b/docs/changelog/108856.yaml deleted file mode 100644 index 9b8f42248a442..0000000000000 --- a/docs/changelog/108856.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108856 -summary: Return noop instance `DocSizeObserver` for updates with scripts -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/108860.yaml b/docs/changelog/108860.yaml deleted file mode 100644 index 93aa8ce7c08ff..0000000000000 --- a/docs/changelog/108860.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108860 -summary: "Apm-data: enable plugin by default" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108862.yaml b/docs/changelog/108862.yaml deleted file mode 100644 index ddba15f11e8f5..0000000000000 --- a/docs/changelog/108862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108862 -summary: "Apm-data: set codec: best_compression for logs-apm.* data streams" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108868.yaml b/docs/changelog/108868.yaml deleted file mode 100644 index d0643f056cce8..0000000000000 --- a/docs/changelog/108868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108868 -summary: GA the update trained model action -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/108870.yaml b/docs/changelog/108870.yaml deleted file mode 100644 index 435eea9845f16..0000000000000 --- a/docs/changelog/108870.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108870 -summary: Adding score from `RankDoc` to `SearchHit` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/108871.yaml b/docs/changelog/108871.yaml deleted file mode 100644 index 46bf8ca9d8404..0000000000000 --- a/docs/changelog/108871.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108871 -summary: "Reapply \"ESQL: Expose \"_ignored\" metadata field\"" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/108878.yaml b/docs/changelog/108878.yaml deleted file mode 100644 index 1a8127869a647..0000000000000 --- a/docs/changelog/108878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108878 -summary: Support arrays in fallback synthetic source implementation -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/108881.yaml b/docs/changelog/108881.yaml deleted file mode 100644 index b6de1129cfa03..0000000000000 --- a/docs/changelog/108881.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108881 -summary: Add synthetic source support for `geo_shape` via fallback implementation -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/108885.yaml b/docs/changelog/108885.yaml deleted file mode 100644 index c66843e082e29..0000000000000 --- a/docs/changelog/108885.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108885 -summary: "Apm-data: increase priority above Fleet templates" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/108886.yaml b/docs/changelog/108886.yaml deleted file mode 100644 index 18df59e577713..0000000000000 --- a/docs/changelog/108886.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108886 -summary: Expose `?master_timeout` on get-shutdown API -area: Infra/Node Lifecycle -type: bug -issues: [] diff --git a/docs/changelog/108891.yaml b/docs/changelog/108891.yaml deleted file mode 100644 index 8282b616b34a9..0000000000000 --- a/docs/changelog/108891.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108891 -summary: Fix NPE during destination index creation -area: Transform -type: bug -issues: - - 108890 diff --git a/docs/changelog/108895.yaml b/docs/changelog/108895.yaml deleted file mode 100644 index 15293896b20c5..0000000000000 --- a/docs/changelog/108895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108895 -summary: Add permission to secure access to certain config files specified by settings -area: "Security" -type: bug -issues: [] diff --git a/docs/changelog/108896.yaml b/docs/changelog/108896.yaml deleted file mode 100644 index c52f074b65605..0000000000000 --- a/docs/changelog/108896.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 108896 -summary: Introduce `logs` index mode as Tech Preview -area: Logs -type: feature -issues: - - 108896 diff --git a/docs/changelog/108911.yaml b/docs/changelog/108911.yaml deleted file mode 100644 index 8832e01f7426e..0000000000000 --- a/docs/changelog/108911.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108911 -summary: Store source for fields in objects with `dynamic` override -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/108942.yaml b/docs/changelog/108942.yaml deleted file mode 100644 index c58b06a92cee8..0000000000000 --- a/docs/changelog/108942.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108942 -summary: Fix NPE in trained model assignment updater -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/108947.yaml b/docs/changelog/108947.yaml deleted file mode 100644 index 8aa4293242985..0000000000000 --- a/docs/changelog/108947.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108947 -summary: Provide the `DocumentSizeReporter` with index mode -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/108999.yaml b/docs/changelog/108999.yaml deleted file mode 100644 index 089d765b4e2d0..0000000000000 --- a/docs/changelog/108999.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 108999 -summary: Use default translog durability on AD results index -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109007.yaml b/docs/changelog/109007.yaml deleted file mode 100644 index c828db64220fb..0000000000000 --- a/docs/changelog/109007.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109007 -summary: Multivalue Sparse Vector Support -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/109017.yaml b/docs/changelog/109017.yaml new file mode 100644 index 0000000000000..80bcdd6fc0e25 --- /dev/null +++ b/docs/changelog/109017.yaml @@ -0,0 +1,6 @@ +pr: 109017 +summary: "ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security\ + \ solution" +area: ES|QL +type: "feature" +issues: [ ] diff --git a/docs/changelog/109025.yaml b/docs/changelog/109025.yaml deleted file mode 100644 index 38d19cab13d30..0000000000000 --- a/docs/changelog/109025.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109025 -summary: Introduce a setting controlling the activation of the `logs` index mode in logs@settings -area: Logs -type: feature -issues: - - 108762 diff --git a/docs/changelog/109042.yaml b/docs/changelog/109042.yaml deleted file mode 100644 index 5aa80db991c0d..0000000000000 --- a/docs/changelog/109042.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109042 -summary: Add Create or update query rule API call -area: Application -type: enhancement -issues: [ ] diff --git a/docs/changelog/109043.yaml b/docs/changelog/109043.yaml deleted file mode 100644 index bdfe3addea8e9..0000000000000 --- a/docs/changelog/109043.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109043 -summary: "Apm-data: set concrete values for `metricset.interval`" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109044.yaml b/docs/changelog/109044.yaml deleted file mode 100644 index 9e50c377606a0..0000000000000 --- a/docs/changelog/109044.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109044 -summary: Enable fallback synthetic source for `token_count` -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/109047.yaml b/docs/changelog/109047.yaml deleted file mode 100644 index 85a8808353a08..0000000000000 --- a/docs/changelog/109047.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109047 -summary: Prevent concurrent jobs during cleanup -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/109070.yaml b/docs/changelog/109070.yaml deleted file mode 100644 index 8dbc0ec1c6cf2..0000000000000 --- a/docs/changelog/109070.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109070 -summary: "ESQL: Add `ip_prefix` function" -area: ES|QL -type: feature -issues: - - 99064 diff --git a/docs/changelog/109071.yaml b/docs/changelog/109071.yaml deleted file mode 100644 index 275a5433cc1d8..0000000000000 --- a/docs/changelog/109071.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109071 -summary: Better handling of multiple rescorers clauses with LTR -area: "Search" -type: bug -issues: [] diff --git a/docs/changelog/109078.yaml b/docs/changelog/109078.yaml deleted file mode 100644 index f602ee9b131bc..0000000000000 --- a/docs/changelog/109078.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109078 -summary: Expose API Key cache metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/109084.yaml b/docs/changelog/109084.yaml deleted file mode 100644 index 67ff5610c5a66..0000000000000 --- a/docs/changelog/109084.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109084 -summary: Add AVX-512 optimised vector distance functions for int7 on x64 -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/109104.yaml b/docs/changelog/109104.yaml deleted file mode 100644 index 985cf14bc5952..0000000000000 --- a/docs/changelog/109104.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109104 -summary: Offload request to generic threadpool -area: Machine Learning -type: bug -issues: - - 109100 diff --git a/docs/changelog/109123.yaml b/docs/changelog/109123.yaml deleted file mode 100644 index dfd7e52b33e7f..0000000000000 --- a/docs/changelog/109123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109123 -summary: "[Inference API] Check for related pipelines on delete inference endpoint" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/109126.yaml b/docs/changelog/109126.yaml deleted file mode 100644 index 248eacc76b65c..0000000000000 --- a/docs/changelog/109126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109126 -summary: Correctly handle duplicate model ids for the `_cat` trained models api and usage statistics -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/109167.yaml b/docs/changelog/109167.yaml deleted file mode 100644 index e366b2302263c..0000000000000 --- a/docs/changelog/109167.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109167 -summary: Fixes cluster state-based role mappings not recovered from disk -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/109174.yaml b/docs/changelog/109174.yaml deleted file mode 100644 index 5cd57ebd34ac6..0000000000000 --- a/docs/changelog/109174.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109174 -summary: "ESQL: Change \"substring\" function to not return null on empty string" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109185.yaml b/docs/changelog/109185.yaml deleted file mode 100644 index 4da72c4b20ffb..0000000000000 --- a/docs/changelog/109185.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109185 -summary: Handle unmatching remote cluster wildcards properly for `IndicesRequest.SingleIndexNoWildcards` - requests -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/109194.yaml b/docs/changelog/109194.yaml deleted file mode 100644 index bf50139547f62..0000000000000 --- a/docs/changelog/109194.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109194 -summary: "[Inference API] Add Mistral Embeddings Support to Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/109196.yaml b/docs/changelog/109196.yaml deleted file mode 100644 index 7f5ca3efbc8d4..0000000000000 --- a/docs/changelog/109196.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109196 -summary: Handle nullable `DocsStats` and `StoresStats` -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/109204.yaml b/docs/changelog/109204.yaml deleted file mode 100644 index b5b22ef1a06f9..0000000000000 --- a/docs/changelog/109204.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109204 -summary: Detect long-running tasks on network threads -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/109205.yaml b/docs/changelog/109205.yaml deleted file mode 100644 index 10f13a6549fbc..0000000000000 --- a/docs/changelog/109205.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109205 -summary: "ESQL: Fix `IpPrefix` function not handling correctly `ByteRefs`" -area: ES|QL -type: bug -issues: - - 109198 diff --git a/docs/changelog/109219.yaml b/docs/changelog/109219.yaml deleted file mode 100644 index abf4f49235166..0000000000000 --- a/docs/changelog/109219.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 109219 -summary: Update Lucene version to 9.11 -area: Search -type: feature -issues: [] -highlight: - title: "Update Elasticsearch to Lucene 9.11" - body: |- - Elasticsearch is now updated using the latest Lucene version 9.11. - Here are the full release notes: - But, here are some particular highlights: - - Usage of MADVISE for better memory management: https://github.com/apache/lucene/pull/13196 - - Use RWLock to access LRUQueryCache to reduce contention: https://github.com/apache/lucene/pull/13306 - - Speedup multi-segment HNSW graph search for nested kNN queries: https://github.com/apache/lucene/pull/13121 - - Add a MemorySegment Vector scorer - for scoring without copying on-heap vectors: https://github.com/apache/lucene/pull/13339 diff --git a/docs/changelog/109220.yaml b/docs/changelog/109220.yaml deleted file mode 100644 index b8efa8f784d7a..0000000000000 --- a/docs/changelog/109220.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109220 -summary: "ESQL: add REPEAT string function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109233.yaml b/docs/changelog/109233.yaml deleted file mode 100644 index 36010273c80db..0000000000000 --- a/docs/changelog/109233.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109233 -summary: Fix trappy timeouts in security settings APIs -area: Security -type: bug -issues: [] diff --git a/docs/changelog/109236.yaml b/docs/changelog/109236.yaml deleted file mode 100644 index e2eb917ea0343..0000000000000 --- a/docs/changelog/109236.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109236 -summary: Use proper executor for failing requests when connection closes -area: Network -type: bug -issues: - - 109225 diff --git a/docs/changelog/109240.yaml b/docs/changelog/109240.yaml deleted file mode 100644 index a9fad3abdc47f..0000000000000 --- a/docs/changelog/109240.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109240 -summary: Fix trappy timeout in allocation explain API -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/109241.yaml b/docs/changelog/109241.yaml deleted file mode 100644 index b7343b9df1841..0000000000000 --- a/docs/changelog/109241.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109241 -summary: Fix misc trappy allocation API timeouts -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/109256.yaml b/docs/changelog/109256.yaml deleted file mode 100644 index 30c15ed77f9b9..0000000000000 --- a/docs/changelog/109256.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 109256 -summary: "[ESQL] Migrate `SimplifyComparisonArithmetics` optimization" -area: ES|QL -type: bug -issues: - - 108388 - - 108743 diff --git a/docs/changelog/109312.yaml b/docs/changelog/109312.yaml deleted file mode 100644 index 594d3f90e8fd1..0000000000000 --- a/docs/changelog/109312.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109312 -summary: Enable fallback synthetic source for `point` and `shape` -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/109317.yaml b/docs/changelog/109317.yaml deleted file mode 100644 index 1d8595d99c2a6..0000000000000 --- a/docs/changelog/109317.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 109317 -summary: Add new int4 quantization to dense_vector -area: Search -type: feature -issues: [] -highlight: - title: Add new int4 quantization to dense_vector - body: |- - New int4 (half-byte) scalar quantization support via two knew index types: `int4_hnsw` and `int4_flat`. - This gives an 8x reduction from `float32` with some accuracy loss. In addition to less memory required, this - improves query and merge speed significantly when compared to raw vectors. - notable: true - diff --git a/docs/changelog/109332.yaml b/docs/changelog/109332.yaml deleted file mode 100644 index 3d03523fd518b..0000000000000 --- a/docs/changelog/109332.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109332 -summary: "ES|QL: vectorize eval" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109358.yaml b/docs/changelog/109358.yaml deleted file mode 100644 index af47b4129d874..0000000000000 --- a/docs/changelog/109358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109358 -summary: Use the multi node routing action for internal inference services -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109359.yaml b/docs/changelog/109359.yaml deleted file mode 100644 index 37202eb5a28ec..0000000000000 --- a/docs/changelog/109359.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109359 -summary: Adding hamming distance function to painless for `dense_vector` fields -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/109370.yaml b/docs/changelog/109370.yaml deleted file mode 100644 index 32b190d1a1c94..0000000000000 --- a/docs/changelog/109370.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109370 -summary: Enable fallback synthetic source by default -area: Mapping -type: feature -issues: - - 106460 diff --git a/docs/changelog/109384.yaml b/docs/changelog/109384.yaml deleted file mode 100644 index 303da23d57d8e..0000000000000 --- a/docs/changelog/109384.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109384 -summary: Fix serialising inference delete response -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109386.yaml b/docs/changelog/109386.yaml deleted file mode 100644 index 984ee96dde063..0000000000000 --- a/docs/changelog/109386.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109386 -summary: "ESQL: `top_list` aggregation" -area: ES|QL -type: feature -issues: - - 109213 diff --git a/docs/changelog/109395.yaml b/docs/changelog/109395.yaml deleted file mode 100644 index e5336695afa48..0000000000000 --- a/docs/changelog/109395.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109395 -summary: Correct positioning for unique token filter -area: Analysis -type: bug -issues: [] diff --git a/docs/changelog/109410.yaml b/docs/changelog/109410.yaml deleted file mode 100644 index e8c4dcdab42c6..0000000000000 --- a/docs/changelog/109410.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109410 -summary: Support synthetic source for date fields when `ignore_malformed` is used -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml new file mode 100644 index 0000000000000..81b7541bde35b --- /dev/null +++ b/docs/changelog/109414.yaml @@ -0,0 +1,6 @@ +pr: 109414 +summary: Don't fail retention lease sync actions due to capacity constraints +area: CRUD +type: bug +issues: + - 105926 diff --git a/docs/changelog/109444.yaml b/docs/changelog/109444.yaml deleted file mode 100644 index 8c56fe2dd9f02..0000000000000 --- a/docs/changelog/109444.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109444 -summary: "Aggs: Scripted metric allow list" -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/109449.yaml b/docs/changelog/109449.yaml deleted file mode 100644 index 90cb908227f1b..0000000000000 --- a/docs/changelog/109449.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109449 -summary: Reset max page size to settings value -area: Transform -type: bug -issues: - - 109308 diff --git a/docs/changelog/109462.yaml b/docs/changelog/109462.yaml deleted file mode 100644 index a05f4a04e80ae..0000000000000 --- a/docs/changelog/109462.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109462 -summary: Add `wait_for_completion` parameter to delete snapshot request -area: Distributed -type: enhancement -issues: - - 101300 diff --git a/docs/changelog/109470.yaml b/docs/changelog/109470.yaml deleted file mode 100644 index 837c1664b775a..0000000000000 --- a/docs/changelog/109470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109470 -summary: Enabling profiling for `RankBuilders` and adding tests for RRF -area: Ranking -type: enhancement -issues: [] diff --git a/docs/changelog/109480.yaml b/docs/changelog/109480.yaml deleted file mode 100644 index 3a6f48e9bd840..0000000000000 --- a/docs/changelog/109480.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109480 -summary: "[Connector API] Add claim sync job endpoint" -area: Application -type: feature -issues: [] diff --git a/docs/changelog/109481.yaml b/docs/changelog/109481.yaml deleted file mode 100644 index e8251788a90bd..0000000000000 --- a/docs/changelog/109481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109481 -summary: Fork freeing search/scroll contexts to GENERIC pool -area: Search -type: bug -issues: [] diff --git a/docs/changelog/109487.yaml b/docs/changelog/109487.yaml deleted file mode 100644 index c69c77203f12d..0000000000000 --- a/docs/changelog/109487.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109487 -summary: Start Trained Model Deployment API request query params now override body params -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109501.yaml b/docs/changelog/109501.yaml deleted file mode 100644 index 6e81f98816cbf..0000000000000 --- a/docs/changelog/109501.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 109501 -summary: Reflect latest changes in synthetic source documentation -area: Mapping -type: enhancement -issues: [] -highlight: - title: Synthetic `_source` improvements - body: |- - There are multiple improvements to synthetic `_source` functionality: - - * Synthetic `_source` is now supported for all field types including `nested` and `object`. `object` fields are supported with `enabled` set to `false`. - - * Synthetic `_source` can be enabled together with `ignore_malformed` and `ignore_above` parameters for all field types that support them. - notable: false diff --git a/docs/changelog/109506.yaml b/docs/changelog/109506.yaml deleted file mode 100644 index 3a7570ed0b93a..0000000000000 --- a/docs/changelog/109506.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109506 -summary: Support synthetic source for `scaled_float` and `unsigned_long` when `ignore_malformed` - is used -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/109534.yaml b/docs/changelog/109534.yaml deleted file mode 100644 index c6eb520bb70a8..0000000000000 --- a/docs/changelog/109534.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109534 -summary: Propagate accurate deployment timeout -area: Machine Learning -type: bug -issues: - - 109407 diff --git a/docs/changelog/109540.yaml b/docs/changelog/109540.yaml deleted file mode 100644 index 722c60a30fb97..0000000000000 --- a/docs/changelog/109540.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109540 -summary: Add metrics@custom component template to metrics-*-* index template -area: Data streams -type: enhancement -issues: - - 109475 diff --git a/docs/changelog/109551.yaml b/docs/changelog/109551.yaml deleted file mode 100644 index f4949669091d9..0000000000000 --- a/docs/changelog/109551.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109551 -summary: Avoid `InferenceRunner` deadlock -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/109554.yaml b/docs/changelog/109554.yaml deleted file mode 100644 index 4e78a8f3044c7..0000000000000 --- a/docs/changelog/109554.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109554 -summary: "[Query Rules] Add API calls to get or delete individual query rules within\ - \ a ruleset" -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/109563.yaml b/docs/changelog/109563.yaml deleted file mode 100644 index 9099064b6b040..0000000000000 --- a/docs/changelog/109563.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109563 -summary: Add allocation explain output for THROTTLING shards -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/109583.yaml b/docs/changelog/109583.yaml new file mode 100644 index 0000000000000..84757e307b4fb --- /dev/null +++ b/docs/changelog/109583.yaml @@ -0,0 +1,29 @@ +pr: 109583 +summary: "ESQL: INLINESTATS" +area: ES|QL +type: feature +issues: + - 107589 +highlight: + title: "ESQL: INLINESTATS" + body: |- + This adds the `INLINESTATS` command to ESQL which performs a STATS and + then enriches the results into the output stream. So, this query: + + [source,esql] + ---- + FROM test + | INLINESTATS m=MAX(a * b) BY b + | WHERE m == a * b + | SORT a DESC, b DESC + | LIMIT 3 + ---- + + Produces output like: + + | a | b | m | + | --- | --- | ----- | + | 99 | 999 | 98901 | + | 99 | 998 | 98802 | + | 99 | 997 | 98703 | + notable: true diff --git a/docs/changelog/109597.yaml b/docs/changelog/109597.yaml deleted file mode 100644 index 9b99df85da6a3..0000000000000 --- a/docs/changelog/109597.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109597 -summary: Opt `scripted_metric` out of parallelization -area: Aggregations -type: feature -issues: [] diff --git a/docs/changelog/109603.yaml b/docs/changelog/109603.yaml deleted file mode 100644 index 2d6e8b94aa8d0..0000000000000 --- a/docs/changelog/109603.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109603 -summary: Update translog `writeLocation` for `flushListener` after commit -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/109606.yaml b/docs/changelog/109606.yaml deleted file mode 100644 index 6c9089c4c4fde..0000000000000 --- a/docs/changelog/109606.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109606 -summary: Avoid NPE if `users_roles` file does not exist -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/109613.yaml b/docs/changelog/109613.yaml deleted file mode 100644 index 21d152ac1d6de..0000000000000 --- a/docs/changelog/109613.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109613 -summary: Consider `error_trace` supported by all endpoints -area: Infra/REST API -type: bug -issues: - - 109612 diff --git a/docs/changelog/109618.yaml b/docs/changelog/109618.yaml deleted file mode 100644 index f28bb15a53d96..0000000000000 --- a/docs/changelog/109618.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109618 -summary: Fail cluster state API if blocked -area: Cluster Coordination -type: bug -issues: - - 107503 diff --git a/docs/changelog/109634.yaml b/docs/changelog/109634.yaml deleted file mode 100644 index 4c6358578b6de..0000000000000 --- a/docs/changelog/109634.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109634 -summary: "[Query Rules] Require Enterprise License for Query Rules" -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/109651.yaml b/docs/changelog/109651.yaml deleted file mode 100644 index 982e6a5b536cc..0000000000000 --- a/docs/changelog/109651.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109651 -summary: Support synthetic source for `geo_point` when `ignore_malformed` is used -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/109653.yaml b/docs/changelog/109653.yaml deleted file mode 100644 index 665163ec2a91b..0000000000000 --- a/docs/changelog/109653.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109653 -summary: Handle the "JSON memory allocator bytes" field -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/109657.yaml b/docs/changelog/109657.yaml deleted file mode 100644 index 35b315b7568c9..0000000000000 --- a/docs/changelog/109657.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109657 -summary: Track `RequestedRangeNotSatisfiedException` separately in S3 Metrics -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/109672.yaml b/docs/changelog/109672.yaml deleted file mode 100644 index bb6532ab7accf..0000000000000 --- a/docs/changelog/109672.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109672 -summary: Log repo UUID at generation/registration time -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/109717.yaml b/docs/changelog/109717.yaml deleted file mode 100644 index 326657ea4ce21..0000000000000 --- a/docs/changelog/109717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109717 -summary: Bump jackson version in modules:repository-azure -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/109720.yaml b/docs/changelog/109720.yaml deleted file mode 100644 index b029726c84427..0000000000000 --- a/docs/changelog/109720.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109720 -summary: "DocsStats: Add human readable bytesize" -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/109746.yaml b/docs/changelog/109746.yaml deleted file mode 100644 index 5360f545333ac..0000000000000 --- a/docs/changelog/109746.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109746 -summary: ES|QL Add primitive float support to the Compute Engine -area: ES|QL -type: enhancement -issues: - - 109178 diff --git a/docs/changelog/109779.yaml b/docs/changelog/109779.yaml deleted file mode 100644 index 4ccd8d475ec8d..0000000000000 --- a/docs/changelog/109779.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109779 -summary: Include component templates in retention validaiton -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/109781.yaml b/docs/changelog/109781.yaml deleted file mode 100644 index df74645b53d84..0000000000000 --- a/docs/changelog/109781.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109781 -summary: ES|QL Add primitive float variants of all aggregators to the compute engine -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109794.yaml b/docs/changelog/109794.yaml deleted file mode 100644 index d244c69a903ba..0000000000000 --- a/docs/changelog/109794.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109794 -summary: Provide document size reporter with `MapperService` -area: Infra/Metrics -type: bug -issues: [] diff --git a/docs/changelog/109807.yaml b/docs/changelog/109807.yaml deleted file mode 100644 index 5cf8a2c896c4e..0000000000000 --- a/docs/changelog/109807.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109807 -summary: "ESQL: Fix LOOKUP attribute shadowing" -area: ES|QL -type: bug -issues: - - 109392 diff --git a/docs/changelog/109813.yaml b/docs/changelog/109813.yaml deleted file mode 100644 index edcef17e87606..0000000000000 --- a/docs/changelog/109813.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109813 -summary: Add text similarity reranker retriever -area: Ranking -type: feature -issues: [] diff --git a/docs/changelog/109848.yaml b/docs/changelog/109848.yaml deleted file mode 100644 index 858bbe84ef3a4..0000000000000 --- a/docs/changelog/109848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109848 -summary: Denser in-memory representation of `ShardBlobsToDelete` -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/109873.yaml b/docs/changelog/109873.yaml deleted file mode 100644 index c77197cc22d0a..0000000000000 --- a/docs/changelog/109873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109873 -summary: "ESQL: add Arrow dataframes output format" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/109876.yaml b/docs/changelog/109876.yaml deleted file mode 100644 index 4a65b4e17c4a3..0000000000000 --- a/docs/changelog/109876.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109876 -summary: Always pick the user `maxPageSize` value -area: Transform -type: bug -issues: - - 109844 diff --git a/docs/changelog/109880.yaml b/docs/changelog/109880.yaml deleted file mode 100644 index 71c7209824a8a..0000000000000 --- a/docs/changelog/109880.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 109880 -summary: Deprecate `text_expansion` and `weighted_tokens` queries -area: Machine Learning -type: deprecation -issues: [ ] -deprecation: - title: Deprecate `text_expansion` and `weighted_tokens` queries - area: REST API - details: The `text_expansion` and `weighted_tokens` queries have been replaced by `sparse_vector`. - impact: Please update your existing `text_expansion` and `weighted_tokens` queries to use `sparse_vector.` diff --git a/docs/changelog/109882.yaml b/docs/changelog/109882.yaml deleted file mode 100644 index 0f0fed01c5a7a..0000000000000 --- a/docs/changelog/109882.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109882 -summary: Support synthetic source together with `ignore_malformed` in histogram fields -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/109893.yaml b/docs/changelog/109893.yaml deleted file mode 100644 index df6d6e51236c8..0000000000000 --- a/docs/changelog/109893.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109893 -summary: Add Anthropic messages integration to Inference API -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/109908.yaml b/docs/changelog/109908.yaml deleted file mode 100644 index cdf2acf17096c..0000000000000 --- a/docs/changelog/109908.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109908 -summary: "Update checkpoints after post-replication actions, even on failure" -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/109931.yaml b/docs/changelog/109931.yaml deleted file mode 100644 index 3575cfd49176f..0000000000000 --- a/docs/changelog/109931.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109931 -summary: Apply FLS to the contents of `IgnoredSourceFieldMapper` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/109957.yaml b/docs/changelog/109957.yaml deleted file mode 100644 index 6bbcd8175501c..0000000000000 --- a/docs/changelog/109957.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109957 -summary: Add request metric to `RestController` to track success/failure (by status - code) -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/109963.yaml b/docs/changelog/109963.yaml deleted file mode 100644 index 1745d549582d4..0000000000000 --- a/docs/changelog/109963.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109963 -summary: Propagate mapper builder context flags across nested mapper builder context - creation -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/109967.yaml b/docs/changelog/109967.yaml deleted file mode 100644 index cfc6b6462954b..0000000000000 --- a/docs/changelog/109967.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109967 -summary: Default the HF service to cosine similarity -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/109981.yaml b/docs/changelog/109981.yaml deleted file mode 100644 index cf9388f79e29c..0000000000000 --- a/docs/changelog/109981.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109981 -summary: Limit number of synonym rules that can be created -area: Mapping -type: bug -issues: [108785] diff --git a/docs/changelog/109989.yaml b/docs/changelog/109989.yaml deleted file mode 100644 index f1f5972b60eb3..0000000000000 --- a/docs/changelog/109989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109989 -summary: "ESQL: Fix Join references" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/109993.yaml b/docs/changelog/109993.yaml deleted file mode 100644 index 40d161b6b5c24..0000000000000 --- a/docs/changelog/109993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109993 -summary: "[ES|QL] `weighted_avg`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110004.yaml b/docs/changelog/110004.yaml deleted file mode 100644 index f680016527a9c..0000000000000 --- a/docs/changelog/110004.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 110004 -summary: Mark Query Rules as GA -area: Relevance -type: feature -issues: [] -highlight: - title: Mark Query Rules as GA - body: |- - This PR marks query rules as Generally Available. All APIs are no longer - in tech preview. - notable: true diff --git a/docs/changelog/110016.yaml b/docs/changelog/110016.yaml deleted file mode 100644 index 28ad55aa796c8..0000000000000 --- a/docs/changelog/110016.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110016 -summary: Opt in keyword field into fallback synthetic source if needed -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110019.yaml b/docs/changelog/110019.yaml deleted file mode 100644 index 632e79008d351..0000000000000 --- a/docs/changelog/110019.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110019 -summary: Improve mechanism for extracting the result of a `PlainActionFuture` -area: Distributed -type: enhancement -issues: - - 108125 diff --git a/docs/changelog/110046.yaml b/docs/changelog/110046.yaml deleted file mode 100644 index 6ebe440e7aced..0000000000000 --- a/docs/changelog/110046.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110046 -summary: "ESQL: make named params objects truly per request" -area: ES|QL -type: bug -issues: - - 110028 diff --git a/docs/changelog/110059.yaml b/docs/changelog/110059.yaml deleted file mode 100644 index ba160c091cdc2..0000000000000 --- a/docs/changelog/110059.yaml +++ /dev/null @@ -1,32 +0,0 @@ -pr: 110059 -summary: Adds new `bit` `element_type` for `dense_vectors` -area: Vector Search -type: feature -issues: [] -highlight: - title: Adds new `bit` `element_type` for `dense_vectors` - body: |- - This adds `bit` vector support by adding `element_type: bit` for - vectors. This new element type works for indexed and non-indexed - vectors. Additionally, it works with `hnsw` and `flat` index types. No - quantization based codec works with this element type, this is - consistent with `byte` vectors. - - `bit` vectors accept up to `32768` dimensions in size and expect vectors - that are being indexed to be encoded either as a hexidecimal string or a - `byte[]` array where each element of the `byte` array represents `8` - bits of the vector. - - `bit` vectors support script usage and regular query usage. When - indexed, all comparisons done are `xor` and `popcount` summations (aka, - hamming distance), and the scores are transformed and normalized given - the vector dimensions. - - For scripts, `l1norm` is the same as `hamming` distance and `l2norm` is - `sqrt(l1norm)`. `dotProduct` and `cosineSimilarity` are not supported. - - Note, the dimensions expected by this element_type are always to be - divisible by `8`, and the `byte[]` vectors provided for index must be - have size `dim/8` size, where each byte element represents `8` bits of - the vectors. - notable: true diff --git a/docs/changelog/110061.yaml b/docs/changelog/110061.yaml deleted file mode 100644 index 1880a2a197722..0000000000000 --- a/docs/changelog/110061.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110061 -summary: Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused -area: Watcher -type: bug -issues: - - 105933 diff --git a/docs/changelog/110066.yaml b/docs/changelog/110066.yaml deleted file mode 100644 index 920c6304b63ae..0000000000000 --- a/docs/changelog/110066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110066 -summary: Support flattened fields and multi-fields as dimensions in downsampling -area: Downsampling -type: bug -issues: - - 99297 diff --git a/docs/changelog/110096.yaml b/docs/changelog/110096.yaml deleted file mode 100644 index 3d6616c289266..0000000000000 --- a/docs/changelog/110096.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110096 -summary: Fix `ClassCastException` with MV_EXPAND on missing field -area: ES|QL -type: bug -issues: - - 109974 diff --git a/docs/changelog/110102.yaml b/docs/changelog/110102.yaml deleted file mode 100644 index d1b9b53e2dfc5..0000000000000 --- a/docs/changelog/110102.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110102 -summary: Optimize ST_DISTANCE filtering with Lucene circle intersection query -area: ES|QL -type: enhancement -issues: - - 109972 diff --git a/docs/changelog/110112.yaml b/docs/changelog/110112.yaml deleted file mode 100644 index eca5fd9af15ce..0000000000000 --- a/docs/changelog/110112.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110112 -summary: Increase response size limit for batched requests -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110146.yaml b/docs/changelog/110146.yaml deleted file mode 100644 index 61ba35cec319b..0000000000000 --- a/docs/changelog/110146.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110146 -summary: Fix trailing slash in `ml.get_categories` specification -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110160.yaml b/docs/changelog/110160.yaml deleted file mode 100644 index 0c38c23c69067..0000000000000 --- a/docs/changelog/110160.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110160 -summary: Opt in number fields into fallback synthetic source when doc values a… -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110176.yaml b/docs/changelog/110176.yaml deleted file mode 100644 index ae1d7d10d6dc4..0000000000000 --- a/docs/changelog/110176.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110176 -summary: Fix trailing slash in two rollup specifications -area: Rollup -type: bug -issues: [] diff --git a/docs/changelog/110177.yaml b/docs/changelog/110177.yaml deleted file mode 100644 index 0ac5328d88df4..0000000000000 --- a/docs/changelog/110177.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110177 -summary: Fix trailing slash in `security.put_privileges` specification -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/110179.yaml b/docs/changelog/110179.yaml deleted file mode 100644 index b99a390c8586f..0000000000000 --- a/docs/changelog/110179.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110179 -summary: Make repository analysis API available to non-operators -area: Snapshot/Restore -type: enhancement -issues: - - 100318 diff --git a/docs/changelog/110186.yaml b/docs/changelog/110186.yaml deleted file mode 100644 index 23eaab118e2ab..0000000000000 --- a/docs/changelog/110186.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110186 -summary: Don't sample calls to `ReduceContext#consumeBucketsAndMaybeBreak` ins `InternalDateHistogram` - and `InternalHistogram` during reduction -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/110201.yaml b/docs/changelog/110201.yaml deleted file mode 100644 index a880638881948..0000000000000 --- a/docs/changelog/110201.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110201 -summary: "ES|QL: Fix DISSECT that overwrites input" -area: ES|QL -type: bug -issues: - - 110184 diff --git a/docs/changelog/110214.yaml b/docs/changelog/110214.yaml deleted file mode 100644 index 20f61cac64454..0000000000000 --- a/docs/changelog/110214.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110214 -summary: Handle `ignore_above` in synthetic source for flattened fields -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml new file mode 100644 index 0000000000000..00ab20b230e2c --- /dev/null +++ b/docs/changelog/110216.yaml @@ -0,0 +1,5 @@ +pr: 110216 +summary: Register SLM run before snapshotting to save stats +area: ILM+SLM +type: enhancement +issues: [] diff --git a/docs/changelog/110233.yaml b/docs/changelog/110233.yaml deleted file mode 100644 index d9ce4057090a4..0000000000000 --- a/docs/changelog/110233.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110233 -summary: Support k parameter for knn query -area: Vector Search -type: enhancement -issues: - - 108473 diff --git a/docs/changelog/110234.yaml b/docs/changelog/110234.yaml deleted file mode 100644 index 0656ba5fb6636..0000000000000 --- a/docs/changelog/110234.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110234 -summary: Upgrade to Lucene-9.11.1 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/110236.yaml b/docs/changelog/110236.yaml deleted file mode 100644 index e2dbff7fbf768..0000000000000 --- a/docs/changelog/110236.yaml +++ /dev/null @@ -1,21 +0,0 @@ -pr: 110236 -summary: '`ParseHeapRatioOrDeprecatedByteSizeValue` for `indices.breaker.total.limit`' -area: Infra/Settings -type: deprecation -issues: [] -deprecation: - title: 'Deprecate absolute size values for `indices.breaker.total.limit` setting' - area: Cluster and node setting - details: Previously, the value of `indices.breaker.total.limit` could be specified as - an absolute size in bytes. This setting controls the overal amount of - memory the server is allowed to use before taking remedial actions. Setting - this to a specific number of bytes led to strange behaviour when the node - maximum heap size changed because the circut breaker limit would remain - unchanged. This would either leave the value too low, causing part of the - heap to remain unused; or it would leave the value too high, causing the - circuit breaker to be ineffective at preventing OOM errors. The only - reasonable behaviour for this setting is that it scales with the size of - the heap, and so absolute byte limits are now deprecated. - impact: Users must change their configuration to specify a percentage instead of - an absolute number of bytes for `indices.breaker.total.limit`, or else - accept the default, which is already specified as a percentage. diff --git a/docs/changelog/110248.yaml b/docs/changelog/110248.yaml deleted file mode 100644 index 85739528b69c6..0000000000000 --- a/docs/changelog/110248.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110248 -summary: "[Inference API] Add Amazon Bedrock Support to Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/110251.yaml b/docs/changelog/110251.yaml deleted file mode 100644 index a3b0c3128be35..0000000000000 --- a/docs/changelog/110251.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 110251 -summary: Support index sorting with nested fields -area: Logs -type: enhancement -issues: - - 107349 -highlight: - title: Index sorting on indexes with nested fields - body: |- - Index sorting is now supported for indexes with mappings containing nested objects. - The index sort spec (as specified by `index.sort.field`) can't contain any nested - fields, still. - notable: false diff --git a/docs/changelog/110334.yaml b/docs/changelog/110334.yaml deleted file mode 100644 index f83ac04ded773..0000000000000 --- a/docs/changelog/110334.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110334 -summary: Sentence Chunker -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110337.yaml b/docs/changelog/110337.yaml deleted file mode 100644 index bf21a95c9157f..0000000000000 --- a/docs/changelog/110337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110337 -summary: Support `ignore_above` on keyword dimensions -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/110338.yaml b/docs/changelog/110338.yaml deleted file mode 100644 index 2334a1cbc9283..0000000000000 --- a/docs/changelog/110338.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110338 -summary: Add `semantic_text` field type and `semantic` query -area: Mapping -type: feature -issues: [] diff --git a/docs/changelog/110347.yaml b/docs/changelog/110347.yaml deleted file mode 100644 index 8727128230935..0000000000000 --- a/docs/changelog/110347.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110347 -summary: "ESQL: Renamed `TopList` to Top" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110361.yaml b/docs/changelog/110361.yaml deleted file mode 100644 index 8558c88e06049..0000000000000 --- a/docs/changelog/110361.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110361 -summary: Don't detect `PlainActionFuture` deadlock on concurrent complete -area: Distributed -type: bug -issues: - - 110181 - - 110360 diff --git a/docs/changelog/110369.yaml b/docs/changelog/110369.yaml deleted file mode 100644 index 770294605b444..0000000000000 --- a/docs/changelog/110369.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110369 -summary: Run terms concurrently when cardinality is only lower than shard size -area: Aggregations -type: bug -issues: - - 105505 diff --git a/docs/changelog/110383.yaml b/docs/changelog/110383.yaml deleted file mode 100644 index 5e9bddd4bfcd2..0000000000000 --- a/docs/changelog/110383.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110383 -summary: Add bulk delete roles API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/110391.yaml b/docs/changelog/110391.yaml deleted file mode 100644 index 1e00eda970398..0000000000000 --- a/docs/changelog/110391.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110391 -summary: Fix ST_DISTANCE Lucene push-down for complex predicates -area: ES|QL -type: bug -issues: - - 110349 diff --git a/docs/changelog/110395.yaml b/docs/changelog/110395.yaml deleted file mode 100644 index 690be55abb5b2..0000000000000 --- a/docs/changelog/110395.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 110395 -summary: Mark the Redact processor as Generally Available -area: Ingest Node -type: feature -issues: [] -highlight: - title: The Redact processor is Generally Available - body: The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. The Redact processor was initially released as Technical Preview in `8.7.0`, and is now released as Generally Available. - notable: true diff --git a/docs/changelog/110431.yaml b/docs/changelog/110431.yaml deleted file mode 100644 index 0dd93ef718ef9..0000000000000 --- a/docs/changelog/110431.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110431 -summary: "[Inference API] Fix serialization for inference delete endpoint response" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110476.yaml b/docs/changelog/110476.yaml deleted file mode 100644 index bc12b3711a366..0000000000000 --- a/docs/changelog/110476.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110476 -summary: Fix bug in union-types with type-casting in grouping key of STATS -area: ES|QL -type: bug -issues: - - 109922 - - 110477 diff --git a/docs/changelog/110488.yaml b/docs/changelog/110488.yaml deleted file mode 100644 index fbb439f20fc96..0000000000000 --- a/docs/changelog/110488.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110488 -summary: "ESQL: Validate unique plan attribute names" -area: ES|QL -type: bug -issues: - - 110541 diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml new file mode 100644 index 0000000000000..6274c99b09998 --- /dev/null +++ b/docs/changelog/110524.yaml @@ -0,0 +1,5 @@ +pr: 110524 +summary: Introduce mode `subobjects=auto` for objects +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/110540.yaml b/docs/changelog/110540.yaml deleted file mode 100644 index 5e4994da80704..0000000000000 --- a/docs/changelog/110540.yaml +++ /dev/null @@ -1,16 +0,0 @@ -pr: 110540 -summary: Deprecate using slm privileges to access ilm -area: ILM+SLM -type: deprecation -issues: [] -deprecation: - title: Deprecate using slm privileges to access ilm - area: REST API - details: The `read_slm` privilege can get the ILM status, and - the `manage_slm` privilege can start and stop ILM. Access to these - APIs should be granted using the `read_ilm` and `manage_ilm` privileges - instead. Access to ILM APIs will be removed from SLM privileges in - a future major release, and is now deprecated. - impact: Users that need access to the ILM status API should now - use the `read_ilm` privilege. Users that need to start and stop ILM, - should use the `manage_ilm` privilege. diff --git a/docs/changelog/110586.yaml b/docs/changelog/110586.yaml deleted file mode 100644 index cc2bcb85a2dac..0000000000000 --- a/docs/changelog/110586.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110586 -summary: "ESQL: Fix Max doubles bug with negatives and add tests for Max and Min" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/110630.yaml b/docs/changelog/110630.yaml new file mode 100644 index 0000000000000..9bf78e1209753 --- /dev/null +++ b/docs/changelog/110630.yaml @@ -0,0 +1,5 @@ +pr: 110630 +summary: Telemetry for inference adaptive allocations +area: Machine Learning +type: feature +issues: [] diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml new file mode 100644 index 0000000000000..d4d1dc68cdbcc --- /dev/null +++ b/docs/changelog/110633.yaml @@ -0,0 +1,5 @@ +pr: 110633 +summary: Add manage roles privilege +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/110651.yaml b/docs/changelog/110651.yaml deleted file mode 100644 index c25c63ee0284a..0000000000000 --- a/docs/changelog/110651.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110651 -summary: "Remove `default_field: message` from metrics index templates" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/110665.yaml b/docs/changelog/110665.yaml deleted file mode 100644 index fa6db3190fe60..0000000000000 --- a/docs/changelog/110665.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110665 -summary: "[ESQL] Fix parsing of large magnitude negative numbers" -area: ES|QL -type: bug -issues: - - 104323 diff --git a/docs/changelog/110666.yaml b/docs/changelog/110666.yaml deleted file mode 100644 index d96f8e2024c81..0000000000000 --- a/docs/changelog/110666.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110666 -summary: Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/110707.yaml b/docs/changelog/110707.yaml deleted file mode 100644 index e13688c73c743..0000000000000 --- a/docs/changelog/110707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110707 -summary: Fix issue with returning incomplete fragment for plain highlighter -area: Highlighting -type: bug -issues: [] diff --git a/docs/changelog/110710.yaml b/docs/changelog/110710.yaml deleted file mode 100644 index bf3349ee25cdd..0000000000000 --- a/docs/changelog/110710.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110710 -summary: Add a cluster listener to fix missing node features after upgrading from a version prior to 8.13 -area: Infra/Core -type: bug -issues: - - 109254 diff --git a/docs/changelog/110734.yaml b/docs/changelog/110734.yaml new file mode 100644 index 0000000000000..d6dce144b89cd --- /dev/null +++ b/docs/changelog/110734.yaml @@ -0,0 +1,5 @@ +pr: 110734 +summary: Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up +area: Machine Learning +type: bug +issues: [ ] diff --git a/docs/changelog/110793.yaml b/docs/changelog/110793.yaml deleted file mode 100644 index 8f1f3ba9afeb7..0000000000000 --- a/docs/changelog/110793.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110793 -summary: Fix for union-types for multiple columns with the same name -area: ES|QL -type: bug -issues: - - 110490 - - 109916 diff --git a/docs/changelog/110796.yaml b/docs/changelog/110796.yaml new file mode 100644 index 0000000000000..a54a9a08bbd27 --- /dev/null +++ b/docs/changelog/110796.yaml @@ -0,0 +1,5 @@ +pr: 110796 +summary: Remove needless forking to GENERIC in `TransportMultiSearchAction` +area: Search +type: bug +issues: [] diff --git a/docs/changelog/110816.yaml b/docs/changelog/110816.yaml new file mode 100644 index 0000000000000..bf707376ec9ea --- /dev/null +++ b/docs/changelog/110816.yaml @@ -0,0 +1,6 @@ +pr: 110816 +summary: GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml +area: Infra/Settings +type: bug +issues: + - 110815 diff --git a/docs/changelog/110824.yaml b/docs/changelog/110824.yaml deleted file mode 100644 index 4fe97d6692865..0000000000000 --- a/docs/changelog/110824.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110824 -summary: "[ESQL] Count_distinct(_source) should return a 400" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/110844.yaml b/docs/changelog/110844.yaml deleted file mode 100644 index ea879f13f3e67..0000000000000 --- a/docs/changelog/110844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110844 -summary: Directly download commercial ip geolocation databases from providers -area: Ingest Node -type: feature -issues: [] diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml new file mode 100644 index 0000000000000..214adc97ac7cb --- /dev/null +++ b/docs/changelog/110847.yaml @@ -0,0 +1,5 @@ +pr: 110847 +summary: SLM Interval based scheduling +area: ILM+SLM +type: feature +issues: [] diff --git a/docs/changelog/110901.yaml b/docs/changelog/110901.yaml new file mode 100644 index 0000000000000..599cb7ce9ec98 --- /dev/null +++ b/docs/changelog/110901.yaml @@ -0,0 +1,15 @@ +pr: 110901 +summary: Set lenient to true by default when using updateable synonyms +area: Analysis +type: breaking +issues: [] +breaking: + title: Set lenient to true by default when using updateable synonyms + area: Analysis + details: | + When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` + value will now be `true`. + impact: | + `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by + default. This prevents shard initialization errors on invalid synonyms. + notable: true diff --git a/docs/changelog/110906.yaml b/docs/changelog/110906.yaml deleted file mode 100644 index 6123b1108fd17..0000000000000 --- a/docs/changelog/110906.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110906 -summary: "Add comma before charset parameter in WWW-Authenticate response header" -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/110922.yaml b/docs/changelog/110922.yaml deleted file mode 100644 index 6a85ce57de103..0000000000000 --- a/docs/changelog/110922.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110922 -summary: Speed up collecting zero document string terms -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/110927.yaml b/docs/changelog/110927.yaml deleted file mode 100644 index 3602ce3e811fa..0000000000000 --- a/docs/changelog/110927.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110927 -summary: Fix leak in collapsing search results -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110971.yaml b/docs/changelog/110971.yaml new file mode 100644 index 0000000000000..3579f77dc0d1d --- /dev/null +++ b/docs/changelog/110971.yaml @@ -0,0 +1,5 @@ +pr: 110971 +summary: "Search in ES|QL: Add MATCH operator" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/110974.yaml b/docs/changelog/110974.yaml new file mode 100644 index 0000000000000..c9e8c9b78675e --- /dev/null +++ b/docs/changelog/110974.yaml @@ -0,0 +1,5 @@ +pr: 110974 +summary: Add custom rule parameters to force time shift +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/110986.yaml b/docs/changelog/110986.yaml new file mode 100644 index 0000000000000..4e320b19c9578 --- /dev/null +++ b/docs/changelog/110986.yaml @@ -0,0 +1,6 @@ +pr: 110986 +summary: Fix unnecessary mustache template evaluation +area: Ingest Node +type: enhancement +issues: + - 110191 diff --git a/docs/changelog/110993.yaml b/docs/changelog/110993.yaml new file mode 100644 index 0000000000000..9eb653a09e3a4 --- /dev/null +++ b/docs/changelog/110993.yaml @@ -0,0 +1,5 @@ +pr: 110993 +summary: Add link to Max Shards Per Node exception message +area: Distributed +type: enhancement +issues: [] diff --git a/docs/changelog/111071.yaml b/docs/changelog/111071.yaml new file mode 100644 index 0000000000000..5e8ab53db3d03 --- /dev/null +++ b/docs/changelog/111071.yaml @@ -0,0 +1,5 @@ +pr: 111071 +summary: Use native scalar scorer for int8_flat index +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml new file mode 100644 index 0000000000000..8444681a14a48 --- /dev/null +++ b/docs/changelog/111091.yaml @@ -0,0 +1,5 @@ +pr: 111091 +summary: "X-pack/plugin/otel: introduce x-pack-otel plugin" +area: Data streams +type: feature +issues: [] diff --git a/docs/changelog/111105.yaml b/docs/changelog/111105.yaml new file mode 100644 index 0000000000000..ed32bd1ef7fc3 --- /dev/null +++ b/docs/changelog/111105.yaml @@ -0,0 +1,5 @@ +pr: 111105 +summary: "ESQL: TOP aggregation IP support" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/111118.yaml b/docs/changelog/111118.yaml new file mode 100644 index 0000000000000..c9fe6cb443688 --- /dev/null +++ b/docs/changelog/111118.yaml @@ -0,0 +1,5 @@ +pr: 111118 +summary: "[ES|QL] Simplify patterns for subfields" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/111123.yaml b/docs/changelog/111123.yaml new file mode 100644 index 0000000000000..605b8607f4082 --- /dev/null +++ b/docs/changelog/111123.yaml @@ -0,0 +1,5 @@ +pr: 111123 +summary: Add Lucene segment-level fields stats +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/111154.yaml b/docs/changelog/111154.yaml new file mode 100644 index 0000000000000..3297f5005a811 --- /dev/null +++ b/docs/changelog/111154.yaml @@ -0,0 +1,5 @@ +pr: 111154 +summary: EIS integration +area: Inference +type: feature +issues: [] diff --git a/docs/changelog/111181.yaml b/docs/changelog/111181.yaml new file mode 100644 index 0000000000000..7f9f5937b7652 --- /dev/null +++ b/docs/changelog/111181.yaml @@ -0,0 +1,5 @@ +pr: 111181 +summary: "[Inference API] Add Alibaba Cloud AI Search Model support to Inference API" +area: Machine Learning +type: enhancement +issues: [ ] diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml new file mode 100644 index 0000000000000..9e56facb60d3a --- /dev/null +++ b/docs/changelog/111193.yaml @@ -0,0 +1,6 @@ +pr: 111193 +summary: Fix cases of collections with one point +area: Geo +type: bug +issues: + - 110982 diff --git a/docs/changelog/111212.yaml b/docs/changelog/111212.yaml new file mode 100644 index 0000000000000..67d1513b3ff6f --- /dev/null +++ b/docs/changelog/111212.yaml @@ -0,0 +1,6 @@ +pr: 111212 +summary: Fix score count validation in reranker response +area: Ranking +type: bug +issues: + - 111202 diff --git a/docs/changelog/111215.yaml b/docs/changelog/111215.yaml new file mode 100644 index 0000000000000..dc044c2283fc4 --- /dev/null +++ b/docs/changelog/111215.yaml @@ -0,0 +1,6 @@ +pr: 111215 +summary: Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` + isn't changed as side-effect +area: ILM+SLM +type: bug +issues: [] diff --git a/docs/changelog/111225.yaml b/docs/changelog/111225.yaml new file mode 100644 index 0000000000000..bcd344847cfd2 --- /dev/null +++ b/docs/changelog/111225.yaml @@ -0,0 +1,5 @@ +pr: 111225 +summary: Upgrade Azure SDK +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/111238.yaml b/docs/changelog/111238.yaml new file mode 100644 index 0000000000000..b918b754ff595 --- /dev/null +++ b/docs/changelog/111238.yaml @@ -0,0 +1,6 @@ +pr: 111238 +summary: Fix validation of TEXT fields with case insensitive comparison +area: EQL +type: bug +issues: + - 111235 diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml new file mode 100644 index 0000000000000..384373d52cb20 --- /dev/null +++ b/docs/changelog/111245.yaml @@ -0,0 +1,6 @@ +pr: 111245 +summary: Truncating watcher history if it is too large +area: Watcher +type: bug +issues: + - 94745 diff --git a/docs/changelog/111274.yaml b/docs/changelog/111274.yaml new file mode 100644 index 0000000000000..e26bcc03ce118 --- /dev/null +++ b/docs/changelog/111274.yaml @@ -0,0 +1,5 @@ +pr: 111274 +summary: Include account name in Azure settings exceptions +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/111284.yaml b/docs/changelog/111284.yaml new file mode 100644 index 0000000000000..f87649a134af6 --- /dev/null +++ b/docs/changelog/111284.yaml @@ -0,0 +1,6 @@ +pr: 111284 +summary: Update `semantic_text` field to support indexing numeric and boolean data + types +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/111285.yaml b/docs/changelog/111285.yaml new file mode 100644 index 0000000000000..e4856482b4d6e --- /dev/null +++ b/docs/changelog/111285.yaml @@ -0,0 +1,5 @@ +pr: 111285 +summary: "[Bugfix] Add `accessDeclaredMembers` permission to allow search application templates to parse floats" +area: Relevance +type: bug +issues: [] diff --git a/docs/changelog/111311.yaml b/docs/changelog/111311.yaml new file mode 100644 index 0000000000000..5786e11e885e2 --- /dev/null +++ b/docs/changelog/111311.yaml @@ -0,0 +1,6 @@ +pr: 111311 +summary: Adding support for data streams with a match-all template +area: Data streams +type: bug +issues: + - 111204 diff --git a/docs/changelog/111315.yaml b/docs/changelog/111315.yaml new file mode 100644 index 0000000000000..0e2e56898b51c --- /dev/null +++ b/docs/changelog/111315.yaml @@ -0,0 +1,5 @@ +pr: 111315 +summary: Add link to flood-stage watermark exception message +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/111316.yaml b/docs/changelog/111316.yaml new file mode 100644 index 0000000000000..0d915cd1ec3ea --- /dev/null +++ b/docs/changelog/111316.yaml @@ -0,0 +1,5 @@ +pr: 111316 +summary: "[Service Account] Add `AutoOps` account" +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/111344.yaml b/docs/changelog/111344.yaml new file mode 100644 index 0000000000000..3d5988054749d --- /dev/null +++ b/docs/changelog/111344.yaml @@ -0,0 +1,5 @@ +pr: 111344 +summary: Add support for Azure Managed Identity +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/111367.yaml b/docs/changelog/111367.yaml new file mode 100644 index 0000000000000..89e6c1d3b4da4 --- /dev/null +++ b/docs/changelog/111367.yaml @@ -0,0 +1,5 @@ +pr: 111367 +summary: "ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml new file mode 100644 index 0000000000000..297fa77cd2664 --- /dev/null +++ b/docs/changelog/111412.yaml @@ -0,0 +1,6 @@ +pr: 111412 +summary: Make enrich cache based on memory usage +area: Ingest Node +type: enhancement +issues: + - 106081 diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml new file mode 100644 index 0000000000000..4e2640ac5762a --- /dev/null +++ b/docs/changelog/111420.yaml @@ -0,0 +1,5 @@ +pr: 111420 +summary: "[Query rules] Add `exclude` query rule type" +area: Relevance +type: feature +issues: [] diff --git a/docs/changelog/111437.yaml b/docs/changelog/111437.yaml new file mode 100644 index 0000000000000..a50312ffdd1aa --- /dev/null +++ b/docs/changelog/111437.yaml @@ -0,0 +1,5 @@ +pr: 111437 +summary: "[ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111445.yaml b/docs/changelog/111445.yaml new file mode 100644 index 0000000000000..9ba8e4371bd0c --- /dev/null +++ b/docs/changelog/111445.yaml @@ -0,0 +1,5 @@ +pr: 111445 +summary: Support booleans in routing path +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/111457.yaml b/docs/changelog/111457.yaml new file mode 100644 index 0000000000000..f4ad4ee53eb0a --- /dev/null +++ b/docs/changelog/111457.yaml @@ -0,0 +1,6 @@ +pr: 111457 +summary: Add support for boolean dimensions +area: TSDB +type: enhancement +issues: + - 111338 diff --git a/docs/changelog/111475.yaml b/docs/changelog/111475.yaml new file mode 100644 index 0000000000000..264c975444868 --- /dev/null +++ b/docs/changelog/111475.yaml @@ -0,0 +1,6 @@ +pr: 111475 +summary: "ESQL: Fix for overzealous validation in case of invalid mapped fields" +area: ES|QL +type: bug +issues: + - 111452 diff --git a/docs/changelog/111490.yaml b/docs/changelog/111490.yaml new file mode 100644 index 0000000000000..b67c16189cc62 --- /dev/null +++ b/docs/changelog/111490.yaml @@ -0,0 +1,5 @@ +pr: 111490 +summary: Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/111501.yaml b/docs/changelog/111501.yaml new file mode 100644 index 0000000000000..a424142376e52 --- /dev/null +++ b/docs/changelog/111501.yaml @@ -0,0 +1,6 @@ +pr: 111501 +summary: "[ES|QL] Combine Disjunctive CIDRMatch" +area: ES|QL +type: enhancement +issues: + - 105143 diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml new file mode 100644 index 0000000000000..96e8bd843f750 --- /dev/null +++ b/docs/changelog/111516.yaml @@ -0,0 +1,5 @@ +pr: 111516 +summary: Adding support for `allow_partial_search_results` in PIT +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/111519.yaml b/docs/changelog/111519.yaml new file mode 100644 index 0000000000000..8cc62fb8ed903 --- /dev/null +++ b/docs/changelog/111519.yaml @@ -0,0 +1,5 @@ +pr: 111519 +summary: "ESQL: Don't mutate the `BoolQueryBuilder` in plan" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml new file mode 100644 index 0000000000000..202d16c5a426d --- /dev/null +++ b/docs/changelog/111523.yaml @@ -0,0 +1,5 @@ +pr: 111523 +summary: Search coordinator uses `event.ingested` in cluster state to do rewrites +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/111535.yaml b/docs/changelog/111535.yaml new file mode 100644 index 0000000000000..4beebbf28d4e1 --- /dev/null +++ b/docs/changelog/111535.yaml @@ -0,0 +1,5 @@ +pr: 111535 +summary: Fix remote cluster credential secure settings reload +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml new file mode 100644 index 0000000000000..d4c46f485e664 --- /dev/null +++ b/docs/changelog/111544.yaml @@ -0,0 +1,5 @@ +pr: 111544 +summary: "ESQL: Strings support for MAX and MIN aggregations" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/111552.yaml b/docs/changelog/111552.yaml new file mode 100644 index 0000000000000..d9991788d4fa9 --- /dev/null +++ b/docs/changelog/111552.yaml @@ -0,0 +1,5 @@ +pr: 111552 +summary: Siem ea 9521 improve test +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111576.yaml b/docs/changelog/111576.yaml new file mode 100644 index 0000000000000..6d3c331f4bbd5 --- /dev/null +++ b/docs/changelog/111576.yaml @@ -0,0 +1,6 @@ +pr: 111576 +summary: Execute shard snapshot tasks in shard-id order +area: Snapshot/Restore +type: enhancement +issues: + - 108739 diff --git a/docs/changelog/111600.yaml b/docs/changelog/111600.yaml new file mode 100644 index 0000000000000..0c1e01e1c2e23 --- /dev/null +++ b/docs/changelog/111600.yaml @@ -0,0 +1,5 @@ +pr: 111600 +summary: Make ecs@mappings work with OTel attributes +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/111624.yaml b/docs/changelog/111624.yaml new file mode 100644 index 0000000000000..7b04b244ef7a7 --- /dev/null +++ b/docs/changelog/111624.yaml @@ -0,0 +1,6 @@ +pr: 111624 +summary: Extend logging for dropped warning headers +area: Infra/Core +type: enhancement +issues: + - 90527 diff --git a/docs/changelog/111644.yaml b/docs/changelog/111644.yaml new file mode 100644 index 0000000000000..3705d697c95e3 --- /dev/null +++ b/docs/changelog/111644.yaml @@ -0,0 +1,6 @@ +pr: 111644 +summary: Force using the last centroid during merging +area: Aggregations +type: bug +issues: + - 111065 diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml new file mode 100644 index 0000000000000..077714d15a712 --- /dev/null +++ b/docs/changelog/111655.yaml @@ -0,0 +1,5 @@ +pr: 111655 +summary: Migrate Inference to `ChunkedToXContent` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/111673.yaml b/docs/changelog/111673.yaml new file mode 100644 index 0000000000000..ebc211633fcab --- /dev/null +++ b/docs/changelog/111673.yaml @@ -0,0 +1,5 @@ +pr: 111673 +summary: Properly handle filters on `TextSimilarityRank` retriever +area: Ranking +type: bug +issues: [] diff --git a/docs/changelog/111683.yaml b/docs/changelog/111683.yaml new file mode 100644 index 0000000000000..cbb2e5ad71ddc --- /dev/null +++ b/docs/changelog/111683.yaml @@ -0,0 +1,6 @@ +pr: 111683 +summary: Only emit product origin in deprecation log if present +area: Infra/Logging +type: bug +issues: + - 81757 diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml new file mode 100644 index 0000000000000..ccb3d4d4f87c5 --- /dev/null +++ b/docs/changelog/111689.yaml @@ -0,0 +1,6 @@ +pr: 111689 +summary: Add nanos support to `ZonedDateTime` serialization +area: Infra/Core +type: enhancement +issues: + - 68292 diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml new file mode 100644 index 0000000000000..36e715744ad88 --- /dev/null +++ b/docs/changelog/111690.yaml @@ -0,0 +1,5 @@ +pr: 111690 +summary: "ESQL: Support INLINESTATS grouped on expressions" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111729.yaml b/docs/changelog/111729.yaml new file mode 100644 index 0000000000000..c75c14a997da9 --- /dev/null +++ b/docs/changelog/111729.yaml @@ -0,0 +1,6 @@ +pr: 111729 +summary: Speed up dense/sparse vector stats +area: Vector Search +type: bug +issues: + - 111715 diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml new file mode 100644 index 0000000000000..48b7ee200e45e --- /dev/null +++ b/docs/changelog/111740.yaml @@ -0,0 +1,6 @@ +pr: 111740 +summary: Fix Start Trial API output acknowledgement header for features +area: License +type: bug +issues: + - 111739 diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml new file mode 100644 index 0000000000000..77e0c65005dd6 --- /dev/null +++ b/docs/changelog/111749.yaml @@ -0,0 +1,6 @@ +pr: 111749 +summary: "ESQL: Added `mv_percentile` function" +area: ES|QL +type: feature +issues: + - 111591 diff --git a/docs/changelog/111756.yaml b/docs/changelog/111756.yaml new file mode 100644 index 0000000000000..e58345dbe696a --- /dev/null +++ b/docs/changelog/111756.yaml @@ -0,0 +1,6 @@ +pr: 111756 +summary: Fix `NullPointerException` when doing knn search on empty index without dims +area: Vector Search +type: bug +issues: + - 111733 diff --git a/docs/changelog/111758.yaml b/docs/changelog/111758.yaml new file mode 100644 index 0000000000000..c95cdf48bc8a7 --- /dev/null +++ b/docs/changelog/111758.yaml @@ -0,0 +1,6 @@ +pr: 111758 +summary: Revert "Avoid bucket copies in Aggs" +area: Aggregations +type: bug +issues: + - 111679 diff --git a/docs/changelog/111779.yaml b/docs/changelog/111779.yaml new file mode 100644 index 0000000000000..52c635490e1e4 --- /dev/null +++ b/docs/changelog/111779.yaml @@ -0,0 +1,7 @@ +pr: 111779 +summary: "ESQL: Fix serialization during `can_match`" +area: ES|QL +type: bug +issues: + - 111701 + - 111726 diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml new file mode 100644 index 0000000000000..00b793a19d9c3 --- /dev/null +++ b/docs/changelog/111797.yaml @@ -0,0 +1,6 @@ +pr: 111797 +summary: "ESQL: fix for missing indices error message" +area: ES|QL +type: bug +issues: + - 111712 diff --git a/docs/changelog/111807.yaml b/docs/changelog/111807.yaml new file mode 100644 index 0000000000000..97c5e58461c34 --- /dev/null +++ b/docs/changelog/111807.yaml @@ -0,0 +1,5 @@ +pr: 111807 +summary: Explain Function Score Query +area: Search +type: bug +issues: [] diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml new file mode 100644 index 0000000000000..5a2f220e3a697 --- /dev/null +++ b/docs/changelog/111809.yaml @@ -0,0 +1,5 @@ +pr: 111809 +summary: Add Field caps support for Semantic Text +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/111818.yaml b/docs/changelog/111818.yaml new file mode 100644 index 0000000000000..c3a632861aae6 --- /dev/null +++ b/docs/changelog/111818.yaml @@ -0,0 +1,5 @@ +pr: 111818 +summary: Add tier preference to security index settings allowlist +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml new file mode 100644 index 0000000000000..c40a9e2aef621 --- /dev/null +++ b/docs/changelog/111840.yaml @@ -0,0 +1,5 @@ +pr: 111840 +summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/111843.yaml b/docs/changelog/111843.yaml new file mode 100644 index 0000000000000..c8b20036520f3 --- /dev/null +++ b/docs/changelog/111843.yaml @@ -0,0 +1,5 @@ +pr: 111843 +summary: Add maximum nested depth check to WKT parser +area: Geo +type: bug +issues: [] diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml new file mode 100644 index 0000000000000..3f15e9c20135a --- /dev/null +++ b/docs/changelog/111855.yaml @@ -0,0 +1,5 @@ +pr: 111855 +summary: "ESQL: Profile more timing information" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111863.yaml b/docs/changelog/111863.yaml new file mode 100644 index 0000000000000..1724cd83f984b --- /dev/null +++ b/docs/changelog/111863.yaml @@ -0,0 +1,6 @@ +pr: 111863 +summary: Fixing incorrect bulk request took time +area: Ingest Node +type: bug +issues: + - 111854 diff --git a/docs/changelog/111866.yaml b/docs/changelog/111866.yaml new file mode 100644 index 0000000000000..34bf56da4dc9e --- /dev/null +++ b/docs/changelog/111866.yaml @@ -0,0 +1,6 @@ +pr: 111866 +summary: Fix windows memory locking +area: Infra/Core +type: bug +issues: + - 111847 diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml new file mode 100644 index 0000000000000..26ec90aa6cd4c --- /dev/null +++ b/docs/changelog/111874.yaml @@ -0,0 +1,8 @@ +pr: 111874 +summary: "ESQL: BUCKET: allow numerical spans as whole numbers" +area: ES|QL +type: enhancement +issues: + - 104646 + - 109340 + - 105375 diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml new file mode 100644 index 0000000000000..b8c2111e1d286 --- /dev/null +++ b/docs/changelog/111879.yaml @@ -0,0 +1,6 @@ +pr: 111879 +summary: "ESQL: Have BUCKET generate friendlier intervals" +area: ES|QL +type: enhancement +issues: + - 110916 diff --git a/docs/changelog/111915.yaml b/docs/changelog/111915.yaml new file mode 100644 index 0000000000000..f64c45b82d10c --- /dev/null +++ b/docs/changelog/111915.yaml @@ -0,0 +1,6 @@ +pr: 111915 +summary: Fix DLS & FLS sometimes being enforced when it is disabled +area: Authorization +type: bug +issues: + - 94709 diff --git a/docs/changelog/111917.yaml b/docs/changelog/111917.yaml new file mode 100644 index 0000000000000..0dc760d76a698 --- /dev/null +++ b/docs/changelog/111917.yaml @@ -0,0 +1,7 @@ +pr: 111917 +summary: "[ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and\ + \ In at Analyzer" +area: ES|QL +type: enhancement +issues: + - 111486 diff --git a/docs/changelog/111932.yaml b/docs/changelog/111932.yaml new file mode 100644 index 0000000000000..ce840ecebcff0 --- /dev/null +++ b/docs/changelog/111932.yaml @@ -0,0 +1,6 @@ +pr: 111932 +summary: Fix union-types where one index is missing the field +area: ES|QL +type: bug +issues: + - 111912 diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml new file mode 100644 index 0000000000000..7d856e29d54c5 --- /dev/null +++ b/docs/changelog/111937.yaml @@ -0,0 +1,6 @@ +pr: 111937 +summary: Handle `BigInteger` in xcontent copy +area: Infra/Core +type: bug +issues: + - 111812 diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml new file mode 100644 index 0000000000000..6b9f03ccee31c --- /dev/null +++ b/docs/changelog/111943.yaml @@ -0,0 +1,6 @@ +pr: 111943 +summary: Fix synthetic source for empty nested objects +area: Mapping +type: bug +issues: + - 111811 diff --git a/docs/changelog/111947.yaml b/docs/changelog/111947.yaml new file mode 100644 index 0000000000000..0aff0b9c7b8be --- /dev/null +++ b/docs/changelog/111947.yaml @@ -0,0 +1,5 @@ +pr: 111947 +summary: Improve performance of grok pattern cycle detection +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml new file mode 100644 index 0000000000000..a3a592abaf1ca --- /dev/null +++ b/docs/changelog/111948.yaml @@ -0,0 +1,5 @@ +pr: 111948 +summary: Upgrade xcontent to Jackson 2.17.0 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml new file mode 100644 index 0000000000000..3f23c17d8e652 --- /dev/null +++ b/docs/changelog/111950.yaml @@ -0,0 +1,6 @@ +pr: 111950 +summary: "[ES|QL] Name parameter with leading underscore" +area: ES|QL +type: enhancement +issues: + - 111821 diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml new file mode 100644 index 0000000000000..ebc518203b7cc --- /dev/null +++ b/docs/changelog/111955.yaml @@ -0,0 +1,7 @@ +pr: 111955 +summary: Clean up dangling S3 multipart uploads +area: Snapshot/Restore +type: enhancement +issues: + - 101169 + - 44971 diff --git a/docs/changelog/111966.yaml b/docs/changelog/111966.yaml new file mode 100644 index 0000000000000..facf0a61c4d8a --- /dev/null +++ b/docs/changelog/111966.yaml @@ -0,0 +1,5 @@ +pr: 111966 +summary: No error when `store_array_source` is used without synthetic source +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml new file mode 100644 index 0000000000000..9d758c76369e9 --- /dev/null +++ b/docs/changelog/111968.yaml @@ -0,0 +1,6 @@ +pr: 111968 +summary: "ESQL: don't lose the original casting error message" +area: ES|QL +type: bug +issues: + - 111967 diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml new file mode 100644 index 0000000000000..2d276850c4988 --- /dev/null +++ b/docs/changelog/111969.yaml @@ -0,0 +1,5 @@ +pr: 111969 +summary: "[Profiling] add `container.id` field to event index template" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml new file mode 100644 index 0000000000000..58477c68f0e7c --- /dev/null +++ b/docs/changelog/111972.yaml @@ -0,0 +1,15 @@ +pr: 111972 +summary: Introduce global retention in data stream lifecycle. +area: Data streams +type: feature +issues: [] +highlight: + title: Add global retention in data stream lifecycle + body: "Data stream lifecycle now supports configuring retention on a cluster level,\ + \ namely global retention. Global retention \nallows us to configure two different\ + \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ + \ data streams managed by the data stream lifecycle that do not have retention\n\ + defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ + \ applied to all data streams managed by the data stream lifecycle and it allows\ + \ any data stream \ndata to be deleted after the `max_retention` has passed." + notable: true diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml new file mode 100644 index 0000000000000..d5043d0b44155 --- /dev/null +++ b/docs/changelog/111983.yaml @@ -0,0 +1,6 @@ +pr: 111983 +summary: Avoid losing error message in failure collector +area: ES|QL +type: bug +issues: + - 111894 diff --git a/docs/changelog/111994.yaml b/docs/changelog/111994.yaml new file mode 100644 index 0000000000000..ee62651c43987 --- /dev/null +++ b/docs/changelog/111994.yaml @@ -0,0 +1,6 @@ +pr: 111994 +summary: Merge multiple ignored source entires for the same field +area: Logs +type: bug +issues: + - 111694 diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml new file mode 100644 index 0000000000000..2d84381e632b3 --- /dev/null +++ b/docs/changelog/112005.yaml @@ -0,0 +1,6 @@ +pr: 112005 +summary: Check for valid `parentDoc` before retrieving its previous +area: Mapping +type: bug +issues: + - 111990 diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml new file mode 100644 index 0000000000000..7afb207864ed7 --- /dev/null +++ b/docs/changelog/112019.yaml @@ -0,0 +1,5 @@ +pr: 112019 +summary: Display effective retention in the relevant data stream APIs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml new file mode 100644 index 0000000000000..6cbfb373b7420 --- /dev/null +++ b/docs/changelog/112038.yaml @@ -0,0 +1,6 @@ +pr: 112038 +summary: Semantic reranking should fail whenever inference ID does not exist +area: Relevance +type: bug +issues: + - 111934 diff --git a/docs/changelog/112046.yaml b/docs/changelog/112046.yaml new file mode 100644 index 0000000000000..f3cda1ed7a7d2 --- /dev/null +++ b/docs/changelog/112046.yaml @@ -0,0 +1,5 @@ +pr: 112046 +summary: Fix calculation of parent offset for ignored source in some cases +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml new file mode 100644 index 0000000000000..e974b3413582e --- /dev/null +++ b/docs/changelog/112058.yaml @@ -0,0 +1,5 @@ +pr: 112058 +summary: Fix RRF validation for `rank_constant` < 1 +area: Ranking +type: bug +issues: [] diff --git a/docs/changelog/112066.yaml b/docs/changelog/112066.yaml new file mode 100644 index 0000000000000..5dd846766bc8e --- /dev/null +++ b/docs/changelog/112066.yaml @@ -0,0 +1,6 @@ +pr: 112066 +summary: Do not treat replica as unassigned if primary recently created and unassigned + time is below a threshold +area: Health +type: enhancement +issues: [] diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml new file mode 100644 index 0000000000000..6d6e4d0851523 --- /dev/null +++ b/docs/changelog/112090.yaml @@ -0,0 +1,6 @@ +pr: 112090 +summary: Always check `crsType` when folding spatial functions +area: Geo +type: bug +issues: + - 112089 diff --git a/docs/changelog/112100.yaml b/docs/changelog/112100.yaml new file mode 100644 index 0000000000000..9135edecb4d77 --- /dev/null +++ b/docs/changelog/112100.yaml @@ -0,0 +1,5 @@ +pr: 112100 +summary: Exclude internal data streams from global retention +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml new file mode 100644 index 0000000000000..0c0d7ac44cd17 --- /dev/null +++ b/docs/changelog/112123.yaml @@ -0,0 +1,5 @@ +pr: 112123 +summary: SLM interval schedule followup - add back `getFieldName` style getters +area: ILM+SLM +type: enhancement +issues: [] diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml new file mode 100644 index 0000000000000..f6a7aeb893a5e --- /dev/null +++ b/docs/changelog/112126.yaml @@ -0,0 +1,5 @@ +pr: 112126 +summary: Add support for spatial relationships in point field mapper +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml new file mode 100644 index 0000000000000..11109402b7373 --- /dev/null +++ b/docs/changelog/112133.yaml @@ -0,0 +1,5 @@ +pr: 112133 +summary: Add telemetry for repository usage +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/112139.yaml b/docs/changelog/112139.yaml new file mode 100644 index 0000000000000..d6d992ec1dcf2 --- /dev/null +++ b/docs/changelog/112139.yaml @@ -0,0 +1,6 @@ +pr: 112139 +summary: Fix NPE when executing doc value queries over shape geometries with empty + segments +area: Geo +type: bug +issues: [] diff --git a/docs/changelog/112151.yaml b/docs/changelog/112151.yaml new file mode 100644 index 0000000000000..f5cbfd8da07c2 --- /dev/null +++ b/docs/changelog/112151.yaml @@ -0,0 +1,5 @@ +pr: 112151 +summary: Store original source for keywords using a normalizer +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/112173.yaml b/docs/changelog/112173.yaml new file mode 100644 index 0000000000000..9a43b0d1bf1fa --- /dev/null +++ b/docs/changelog/112173.yaml @@ -0,0 +1,7 @@ +pr: 112173 +summary: Prevent synthetic field loaders accessing stored fields from using stale + data +area: Mapping +type: bug +issues: + - 112156 diff --git a/docs/changelog/112178.yaml b/docs/changelog/112178.yaml new file mode 100644 index 0000000000000..f1011291542b8 --- /dev/null +++ b/docs/changelog/112178.yaml @@ -0,0 +1,6 @@ +pr: 112178 +summary: Avoid wrapping rejection exception in exchange +area: ES|QL +type: bug +issues: + - 112106 diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml new file mode 100644 index 0000000000000..eb22f215f9828 --- /dev/null +++ b/docs/changelog/112199.yaml @@ -0,0 +1,5 @@ +pr: 112199 +summary: Support docvalues only query in shape field +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml new file mode 100644 index 0000000000000..0c2c3d71e3ddf --- /dev/null +++ b/docs/changelog/112200.yaml @@ -0,0 +1,6 @@ +pr: 112200 +summary: "ES|QL: better validation of GROK patterns" +area: ES|QL +type: bug +issues: + - 112111 diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml new file mode 100644 index 0000000000000..430f95a72bb3f --- /dev/null +++ b/docs/changelog/112214.yaml @@ -0,0 +1,5 @@ +pr: 112214 +summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml new file mode 100644 index 0000000000000..bb367d6128001 --- /dev/null +++ b/docs/changelog/112217.yaml @@ -0,0 +1,5 @@ +pr: 112217 +summary: Fix template alias parsing livelock +area: Indices APIs +type: bug +issues: [] diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml new file mode 100644 index 0000000000000..c426dd7ade4ed --- /dev/null +++ b/docs/changelog/112218.yaml @@ -0,0 +1,9 @@ +pr: 112218 +summary: "ESQL: Fix a bug in `MV_PERCENTILE`" +area: ES|QL +type: bug +issues: + - 112193 + - 112180 + - 112187 + - 112188 diff --git a/docs/changelog/112226.yaml b/docs/changelog/112226.yaml new file mode 100644 index 0000000000000..ac36c0c0fe4e2 --- /dev/null +++ b/docs/changelog/112226.yaml @@ -0,0 +1,6 @@ +pr: 112226 +summary: "Fix \"unexpected field [remote_cluster]\" for CCS (RCS 1.0) when using API\ + \ key that references `remote_cluster`" +area: Security +type: bug +issues: [] diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml new file mode 100644 index 0000000000000..ef12dc3f78267 --- /dev/null +++ b/docs/changelog/112230.yaml @@ -0,0 +1,5 @@ +pr: 112230 +summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo +area: Security +type: bug +issues: [] diff --git a/docs/changelog/112242.yaml b/docs/changelog/112242.yaml new file mode 100644 index 0000000000000..7292a00166de2 --- /dev/null +++ b/docs/changelog/112242.yaml @@ -0,0 +1,5 @@ +pr: 112242 +summary: Fix toReleaseVersion() when called on the current version id +area: Infra/Core +type: bug +issues: [111900] diff --git a/docs/changelog/112260.yaml b/docs/changelog/112260.yaml new file mode 100644 index 0000000000000..3f5642188a367 --- /dev/null +++ b/docs/changelog/112260.yaml @@ -0,0 +1,6 @@ +pr: 112260 +summary: Fix DLS over Runtime Fields +area: "Authorization" +type: bug +issues: + - 111637 diff --git a/docs/changelog/112270.yaml b/docs/changelog/112270.yaml new file mode 100644 index 0000000000000..1e6b9c7fc9290 --- /dev/null +++ b/docs/changelog/112270.yaml @@ -0,0 +1,5 @@ +pr: 112270 +summary: Support sparse embedding models in the elasticsearch inference service +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/112273.yaml b/docs/changelog/112273.yaml new file mode 100644 index 0000000000000..3182a1884a145 --- /dev/null +++ b/docs/changelog/112273.yaml @@ -0,0 +1,5 @@ +pr: 111181 +summary: "[Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API" +area: Machine Learning +type: enhancement +issues: [ ] diff --git a/docs/changelog/112277.yaml b/docs/changelog/112277.yaml new file mode 100644 index 0000000000000..eac474555999a --- /dev/null +++ b/docs/changelog/112277.yaml @@ -0,0 +1,5 @@ +pr: 112277 +summary: Upgrade `repository-azure` dependencies +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/112320.yaml b/docs/changelog/112320.yaml new file mode 100644 index 0000000000000..d35a08dfa4e91 --- /dev/null +++ b/docs/changelog/112320.yaml @@ -0,0 +1,5 @@ +pr: 112320 +summary: Upgrade xcontent to Jackson 2.17.2 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/112341.yaml b/docs/changelog/112341.yaml new file mode 100644 index 0000000000000..8f44b53ad9998 --- /dev/null +++ b/docs/changelog/112341.yaml @@ -0,0 +1,5 @@ +pr: 112341 +summary: Fix DLS using runtime fields and synthetic source +area: Authorization +type: bug +issues: [] diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index d7d837b2f8364..16879450c65d8 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -6,6 +6,8 @@ A metric aggregation that executes using scripts to provide a metric output. +WARNING: `scripted_metric` is not available in {serverless-full}. + WARNING: Using scripts can result in slower search speeds. See <>. @@ -127,7 +129,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is a required script. +map_script:: Executed once per document collected. This is a required script. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added @@ -282,4 +284,4 @@ params:: Optional. An object whose contents will be passed as variable If a parent bucket of the scripted metric aggregation does not collect any documents an empty aggregation response will be returned from the shard with a `null` value. In this case the `reduce_script`'s `states` variable will contain `null` as a response from that shard. -`reduce_script`'s should therefore expect and deal with `null` responses from shards. +`reduce_script`'s should therefore expect and deal with `null` responses from shards. diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index e37118019a55c..f0fa4f30fd83f 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -87,8 +87,8 @@ changes to synonym files. Only to be used for search analyzers. * `expand` (defaults to `true`). Expands definitions for equivalent synonym rules. See <>. -* `lenient` (defaults to `false`). -If `true` ignores errors while parsing the synonym configuration. +* `lenient` (defaults to the value of the `updateable` setting). +If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. See <> for an example of `lenient` behaviour for invalid synonym rules. @@ -181,11 +181,11 @@ This can can cause errors on the synonym rule. [WARNING] ==== -Invalid synonym rules can cause errors when applying analyzer changes. +If `lenient` is set to `false`, invalid synonym rules can cause errors when applying analyzer changes. For reloadable analyzers, this prevents reloading and applying changes. You must correct errors in the synonym rules and reload the analyzer. -An index with invalid synonym rules cannot be reopened, making it inoperable when: +When `lenient` is set to `false`, an index with invalid synonym rules cannot be reopened, making it inoperable when: * A node containing the index starts * The index is opened from a closed state diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 1658f016db60b..b0020a1120fc0 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -75,8 +75,8 @@ changes to synonym files. Only to be used for search analyzers. * `expand` (defaults to `true`). Expands definitions for equivalent synonym rules. See <>. -* `lenient` (defaults to `false`). -If `true` ignores errors while parsing the synonym configuration. +* `lenient` (defaults to the value of the `updateable` setting). +If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. See <> for an example of `lenient` behaviour for invalid synonym rules. @@ -169,11 +169,11 @@ This can can cause errors on the synonym rule. [WARNING] ==== -Invalid synonym rules can cause errors when applying analyzer changes. +If `lenient` is set to `false`, invalid synonym rules can cause errors when applying analyzer changes. For reloadable analyzers, this prevents reloading and applying changes. You must correct errors in the synonym rules and reload the analyzer. -An index with invalid synonym rules cannot be reopened, making it inoperable when: +When `lenient` is set to `false`, an index with invalid synonym rules cannot be reopened, making it inoperable when: * A node containing the index starts * The index is opened from a closed state diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 25881b707d724..f8d925945401e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -334,6 +334,7 @@ All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. +[[api-conventions-number-values]] [discrete] === Number Values diff --git a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc index 090eda5ef5436..e4da2c45ee978 100644 --- a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc +++ b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -You can use the following APIs to perform autoscaling operations. +You can use the following APIs to perform {cloud}/ec-autoscaling.html[autoscaling operations]. [discrete] [[autoscaling-api-top-level]] diff --git a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc index 608b7bd7cb903..190428485a003 100644 --- a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Delete autoscaling policy. +Delete {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-delete-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc index 05724b9c48b6e..d635d8c8f7bd0 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling capacity. +Get {cloud}/ec-autoscaling.html[autoscaling] capacity. [[autoscaling-get-autoscaling-capacity-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc index ad00d69d1aeb2..973eedcb361c9 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Get autoscaling policy. +Get {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-get-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc index ff79def51ebb9..e564f83411eb4 100644 --- a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc @@ -7,7 +7,7 @@ NOTE: {cloud-only} -Creates or updates an autoscaling policy. +Creates or updates an {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-put-autoscaling-policy-request]] ==== {api-request-title} diff --git a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc index c46d1dffe2cc8..5a8b009d9f063 100644 --- a/docs/reference/autoscaling/deciders/fixed-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/fixed-decider.asciidoc @@ -6,7 +6,7 @@ experimental[] [WARNING] The fixed decider is intended for testing only. Do not use this decider in production. -The `fixed` decider responds with a fixed required capacity. It is not enabled +The {cloud}/ec-autoscaling.html[autoscaling] `fixed` decider responds with a fixed required capacity. It is not enabled by default but can be enabled for any policy by explicitly configuring it. ==== Configuration settings diff --git a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc index 832cf330053aa..0fc9ad444a213 100644 --- a/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-existence-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-existence-decider]] === Frozen existence decider -The frozen existence decider (`frozen_existence`) ensures that once the first +The {cloud}/ec-autoscaling.html[autoscaling] frozen existence decider (`frozen_existence`) ensures that once the first index enters the frozen ILM phase, the frozen tier is scaled into existence. The frozen existence decider is enabled for all policies governing frozen data diff --git a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc index ab11da04c8642..1977f95797ef0 100644 --- a/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-shards-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-shards-decider]] === Frozen shards decider -The frozen shards decider (`frozen_shards`) calculates the memory required to search +The {cloud}/ec-autoscaling.html[autoscaling] frozen shards decider (`frozen_shards`) calculates the memory required to search the current set of partially mounted indices in the frozen tier. Based on a required memory amount per shard, it calculates the necessary memory in the frozen tier. diff --git a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc index 5a10f31f1365b..3a8e7cdb518b3 100644 --- a/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/frozen-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-frozen-storage-decider]] === Frozen storage decider -The frozen storage decider (`frozen_storage`) calculates the local storage +The {cloud}/ec-autoscaling.html[autoscaling] frozen storage decider (`frozen_storage`) calculates the local storage required to search the current set of partially mounted indices based on a percentage of the total data set size of such indices. It signals that additional storage capacity is necessary when existing capacity is less than the diff --git a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc index 26ced6ad7bb26..5432d96a47edb 100644 --- a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-machine-learning-decider]] === Machine learning decider -The {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} +The {cloud}/ec-autoscaling.html[autoscaling] {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} jobs and trained models. The {ml} decider is enabled for policies governing `ml` nodes. diff --git a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc index 763f1de96f6b9..33c989f3b12eb 100644 --- a/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/proactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-proactive-storage-decider]] === Proactive storage decider -The proactive storage decider (`proactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] proactive storage decider (`proactive_storage`) calculates the storage required to contain the current data set plus an estimated amount of expected additional data. The proactive storage decider is enabled for all policies governing nodes with the `data_hot` role. diff --git a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc index 50897178a88de..7c38df75169fd 100644 --- a/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/reactive-storage-decider.asciidoc @@ -2,7 +2,7 @@ [[autoscaling-reactive-storage-decider]] === Reactive storage decider -The reactive storage decider (`reactive_storage`) calculates the storage required to contain +The {cloud}/ec-autoscaling.html[autoscaling] reactive storage decider (`reactive_storage`) calculates the storage required to contain the current data set. It signals that additional storage capacity is necessary when existing capacity has been exceeded (reactively). diff --git a/docs/reference/autoscaling/index.asciidoc b/docs/reference/autoscaling/index.asciidoc index fbf1a9536973e..e70c464889419 100644 --- a/docs/reference/autoscaling/index.asciidoc +++ b/docs/reference/autoscaling/index.asciidoc @@ -4,7 +4,7 @@ NOTE: {cloud-only} -The autoscaling feature enables an operator to configure tiers of nodes that +The {cloud}/ec-autoscaling.html[autoscaling] feature enables an operator to configure tiers of nodes that self-monitor whether or not they need to scale based on an operator-defined policy. Then, via the autoscaling API, an Elasticsearch cluster can report whether or not it needs additional resources to meet the policy. For example, an diff --git a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc index 9b15bcca3fc85..a6894a933b460 100644 --- a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc @@ -17,7 +17,7 @@ PUT _application/analytics/my_analytics_collection //// -Removes an Analytics Collection and its associated data stream. +Removes a <> Collection and its associated data stream. [[delete-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/index.asciidoc b/docs/reference/behavioral-analytics/apis/index.asciidoc index 042b50259b1bb..692d3374f89f5 100644 --- a/docs/reference/behavioral-analytics/apis/index.asciidoc +++ b/docs/reference/behavioral-analytics/apis/index.asciidoc @@ -9,7 +9,7 @@ beta::[] --- -Use the following APIs to manage tasks and resources related to Behavioral Analytics: +Use the following APIs to manage tasks and resources related to <>: * <> * <> diff --git a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc index 8d2491ff8a6ee..14511a1258278 100644 --- a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc @@ -24,7 +24,7 @@ DELETE _application/analytics/my_analytics_collection2 // TEARDOWN //// -Returns information about Analytics Collections. +Returns information about <> Collections. [[list-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc index 84d9cb5351799..f82717e22ed34 100644 --- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc +++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc @@ -22,7 +22,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Post an event to an Analytics Collection. +Post an event to a <> Collection. [[post-analytics-collection-event-request]] ==== {api-request-title} diff --git a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc index 48273fb3906c4..cbbab2ae3e26c 100644 --- a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc @@ -16,7 +16,7 @@ DELETE _application/analytics/my_analytics_collection // TEARDOWN //// -Creates an Analytics Collection. +Creates a <> Collection. [[put-analytics-collection-request]] ==== {api-request-title} diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 058f4e69ae8e3..c3292fc9971ee 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -39,7 +39,7 @@ The cat recovery API returns information about shard recoveries, both ongoing and completed. It is a more compact view of the JSON <> API. -include::{es-ref-dir}/indices/recovery.asciidoc[tag=shard-recovery-desc] +include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[] [[cat-recovery-path-params]] diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 1c72fb8742b93..b510163bab50b 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Delete auto-follow pattern ++++ -Delete auto-follow patterns. +Delete {ccr} <>. [[ccr-delete-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 46ef288b05088..a2969e993ddfb 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Get auto-follow pattern ++++ -Get auto-follow patterns. +Get {ccr} <>. [[ccr-get-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc index 1e64ab813e2ad..c5ae5a7b4af9d 100644 --- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Pause auto-follow pattern ++++ -Pauses an auto-follow pattern. +Pauses a {ccr} <>. [[ccr-pause-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index d08997068f705..6769f21ca5cef 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Create auto-follow pattern ++++ -Creates an auto-follow pattern. +Creates a {ccr} <>. [[ccr-put-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc index 04da9b4a35ba0..a580bb3838f9b 100644 --- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Resume auto-follow pattern ++++ -Resumes an auto-follow pattern. +Resumes a {ccr} <>. [[ccr-resume-auto-follow-pattern-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 0c9f033639eda..ae94e1931af85 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -2,7 +2,7 @@ [[ccr-apis]] == {ccr-cap} APIs -You can use the following APIs to perform {ccr} operations. +You can use the following APIs to perform <> operations. [discrete] [[ccr-api-top-level]] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 68fd6e210f884..6c049d9c92b59 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -5,7 +5,7 @@ Get follower info ++++ -Retrieves information about all follower indices. +Retrieves information about all <> follower indices. [[ccr-get-follow-info-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 72224cc7f51f4..4892f86b3523d 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -5,7 +5,7 @@ Get follower stats ++++ -Get follower stats. +Get <> follower stats. [[ccr-get-follow-stats-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index ea7e8640056bf..1917c08d6640d 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -5,7 +5,7 @@ Forget follower ++++ -Removes the follower retention leases from the leader. +Removes the <> follower retention leases from the leader. [[ccr-post-forget-follower-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index a4ab69aba8d84..6d4730d10efe6 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -5,7 +5,7 @@ Pause follower ++++ -Pauses a follower index. +Pauses a <> follower index. [[ccr-post-pause-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 47ba51a3fb8a0..b023a8cb5cb70 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -5,7 +5,7 @@ Resume follower ++++ -Resumes a follower index. +Resumes a <> follower index. [[ccr-post-resume-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index b96777b455d3b..dab11ef9e7a54 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -5,7 +5,7 @@ Unfollow ++++ -Converts a follower index to a regular index. +Converts a <> follower index to a regular index. [[ccr-post-unfollow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index eb83e2a13dcf1..b7ae9ac987474 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -5,7 +5,7 @@ Create follower ++++ -Creates a follower index. +Creates a <> follower index. [[ccr-put-follow-request]] ==== {api-request-title} diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 128df5e47c777..92e6bae0bdce8 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,7 +6,7 @@ Get {ccr-init} stats ++++ -Get {ccr} stats. +Get <> stats. [[ccr-get-stats-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 0b0fde6546c29..7547dd74c5ecd 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -4,7 +4,7 @@ Cluster allocation explain ++++ -Provides an explanation for a shard's current allocation. +Provides an explanation for a shard's current <>. [source,console] ---- @@ -81,6 +81,7 @@ you might expect otherwise. ===== Unassigned primary shard +====== Conflicting settings The following request gets an allocation explanation for an unassigned primary shard. @@ -158,6 +159,56 @@ node. <5> The decider which led to the `no` decision for the node. <6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate. +====== Maximum number of retries exceeded + +The following response contains an allocation explanation for an unassigned +primary shard that has reached the maximum number of allocation retry attempts. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", + "unassigned_info" : { + "at" : "2017-01-04T18:03:28.464Z", + "failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException", + "reason": "ALLOCATION_FAILED", + "failed_allocation_attempts": 5, + "last_allocation_status": "no", + }, + "can_allocate": "no", + "allocate_explanation": "cannot allocate because allocation is not permitted to any of the nodes", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "store" : { + "matching_size" : "4.2kb", + "matching_size_in_bytes" : 4325 + }, + "deciders" : [ + { + "decider": "max_retry", + "decision" : "NO", + "explanation": "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2024-07-30T21:04:12.166Z], failed_attempts[5], failed_nodes[[mEKjwwzLT1yJVb8UxT6anw]], delayed=false, details[failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException], allocation_status[deciders_no]]]" + } + ] + } + ] +} +---- +// NOTCONSOLE + +If decider message indicates a transient allocation issue, use +<> to retry allocation. + +====== No valid shard copy + The following response contains an allocation explanation for an unassigned primary shard that was previously allocated. @@ -184,6 +235,8 @@ TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy` ===== Unassigned replica shard +====== Allocation delayed + The following response contains an allocation explanation for a replica that's unassigned due to <>. @@ -241,8 +294,52 @@ unassigned due to <>. <2> The remaining delay before allocating the replica shard. <3> Information about the shard data found on a node. +====== Allocation throttled + +The following response contains an allocation explanation for a replica that's +queued to allocate but currently waiting on other queued shards. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : false, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:53:59.498Z", + "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]", + "last_allocation_status" : "no_attempt" + }, + "can_allocate": "throttled", + "allocate_explanation": "Elasticsearch is currently busy with other activities. It expects to be able to allocate this shard when those activities finish. Please wait.", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "deciders" : [ + { + "decider": "throttling", + "decision": "THROTTLE", + "explanation": "reached the limit of incoming shard recoveries [2], cluster setting [cluster.routing.allocation.node_concurrent_incoming_recoveries=2] (can also be set via [cluster.routing.allocation.node_concurrent_recoveries])" + } + ] + } + ] +} +---- +// NOTCONSOLE + +This is a transient message that might appear when a large amount of shards are allocating. + ===== Assigned shard +====== Cannot remain on current node + The following response contains an allocation explanation for an assigned shard. The response indicates the shard is not allowed to remain on its current node and must be reallocated. @@ -295,6 +392,8 @@ and must be reallocated. <2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node. <3> Whether the shard is allowed to be allocated to another node. +====== Must remain on current node + The following response contains an allocation explanation for a shard that must remain on its current node. Moving the shard to another node would not improve cluster balance. @@ -338,7 +437,7 @@ cluster balance. ===== No arguments If you call the API with no arguments, {es} retrieves an allocation explanation -for an arbitrary unassigned primary or replica shard. +for an arbitrary unassigned primary or replica shard, returning any unassigned primary shards first. [source,console] ---- diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc index f81dcab011da4..c67834269e505 100644 --- a/docs/reference/cluster/delete-desired-balance.asciidoc +++ b/docs/reference/cluster/delete-desired-balance.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -Discards the current desired balance and computes a new desired balance starting from the current allocation of shards. +Discards the current <> and computes a new desired balance starting from the current allocation of shards. This can sometimes help {es} find a desired balance which needs fewer shard movements to achieve, especially if the cluster has experienced changes so substantial that the current desired balance is no longer optimal without {es} having detected that the current desired balance will take more shard movements to achieve than needed. However, this API diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 3fd87dcfedc4f..74afdaa52daf1 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -8,7 +8,7 @@ NOTE: {cloud-only} Exposes: -* the desired balance computation and reconciliation stats +* the <> computation and reconciliation stats * balancing stats such as distribution of shards, disk and ingest forecasts across nodes and data tiers (based on the current cluster state) * routing table with each shard current and desired location diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 084ff471367ce..61c58cea95b83 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -808,6 +808,14 @@ This is not shown for the `shards` level, since mappings may be shared across th `total_estimated_overhead_in_bytes`:: (integer) Estimated heap overhead, in bytes, of mappings on this node, which allows for 1kiB of heap for every mapped field. +`total_segments`:: +(integer) Estimated number of Lucene segments on this node + +`total_segment_fields`:: +(integer) Estimated number of fields at the segment level on this node + +`average_fields_per_segment`:: +(integer) Estimated average number of fields per segment on this node ======= `dense_vector`:: @@ -834,6 +842,142 @@ This is not shown for the `shards` level, since mappings may be shared across th ======= +`shards`:: +(object) When the `shards` level is requested, contains the aforementioned `indices` statistics for every shard (per +index, and then per shard ID), as well as the following shard-specific statistics (which are not shown when the +requested level is higher than `shards`): ++ +.Additional shard-specific statistics for the `shards` level +[%collapsible%open] +======= + +`routing`:: +(object) Contains routing information about the shard. ++ +.Properties of `routing` +[%collapsible%open] +======== + +`state`:: +(string) State of the shard. Returned values are: ++ +* `INITIALIZING`: The shard is initializing/recovering. +* `RELOCATING`: The shard is relocating. +* `STARTED`: The shard has started. +* `UNASSIGNED`: The shard is not assigned to any node. + +`primary`:: +(Boolean) Whether the shard is a primary shard or not. + +`node`:: +(string) ID of the node the shard is allocated to. + +`relocating_node`:: +(string) ID of the node the shard is either relocating to or relocating from, or null if shard is not relocating. + +======== + +`commit`:: +(object) Contains information regarding the last commit point of the shard. ++ +.Properties of `commit` +[%collapsible%open] +======== + +`id`:: +(string) Base64 version of the commit ID. + +`generation`:: +(integer) Lucene generation of the commit. + +`user_data`:: +(object) Contains additional technical information about the commit. + +`num_docs`:: +(integer) The number of docs in the commit. + +======== + +`seq_no`:: +(object) Contains information about <> and checkpoints for the shard. ++ +.Properties of `seq_no` +[%collapsible%open] +======== + +`max_seq_no`:: +(integer) The maximum sequence number issued so far. + +`local_checkpoint`:: +(integer) The current local checkpoint of the shard. + +`global_checkpoint`:: +(integer) The current global checkpoint of the shard. + +======== + +`retention_leases`:: +(object) Contains information about <>. ++ +.Properties of `retention_leases` +[%collapsible%open] +======== + +`primary_term`:: +(integer) The primary term of this retention lease collection. + +`version`:: +(integer) The current version of the retention lease collection. + +`leases`:: +(array of objects) List of current leases for this shard. ++ +.Properties of `leases` +[%collapsible%open] +========= + +`id`:: +(string) The ID of the lease. + +`retaining_seq_no`:: +(integer) The minimum sequence number to be retained by the lease. + +`timestamp`:: +(integer) The timestamp of when the lease was created or renewed. +Recorded in milliseconds since the {wikipedia}/Unix_time[Unix Epoch]. + +`source`:: +(string) The source of the lease. + +========= +======== + +`shard_path`:: +(object) ++ +.Properties of `shard_path` +[%collapsible%open] +======== + +`state_path`:: +(string) The state-path root, without the index name and the shard ID. + +`data_path`:: +(string) The data-path root, without the index name and the shard ID. + +`is_custom_data_path`:: +(boolean) Whether the data path is a custom data location and therefore outside of the nodes configured data paths. + +======== + +`search_idle`:: +(boolean) Whether the shard is <> or not. + +`search_idle_time`:: +(integer) Time since previous searcher access. +Recorded in milliseconds. + +======= ====== [[cluster-nodes-stats-api-response-body-os]] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3b429ef427071..c39bc0dcd2878 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1282,6 +1282,31 @@ They are included here for expert users, but should otherwise be ignored. ===== +==== + +`repositories`:: +(object) Contains statistics about the <> repositories defined in the cluster, broken down +by repository type. ++ +.Properties of `repositories` +[%collapsible%open] +===== + +`count`::: +(integer) The number of repositories of this type in the cluster. + +`read_only`::: +(integer) The number of repositories of this type in the cluster which are registered read-only. + +`read_write`::: +(integer) The number of repositories of this type in the cluster which are not registered as read-only. + +Each repository type may also include other statistics about the repositories of that type here. + +===== + +==== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1579,6 +1604,9 @@ The API returns the following response: }, "snapshots": { ... + }, + "repositories": { + ... } } -------------------------------------------------- @@ -1589,6 +1617,7 @@ The API returns the following response: // TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/] // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] +// TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] @@ -1600,7 +1629,7 @@ The API returns the following response: // the plugins that will be in it. And because we figure folks don't need to // see an exhaustive list anyway. // 2. Similarly, ignore the contents of `network_types`, `discovery_types`, -// `packaging_types` and `snapshots`. +// `packaging_types`, `snapshots` and `repositories`. // 3. Ignore the contents of the (nodes) count object, as what's shown here // depends on the license. Voting-only nodes are e.g. only shown when this // test runs with a basic license. diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 076b315558b60..1290f289e5bbd 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -5,7 +5,7 @@ [[data-streams-change-mappings-and-settings]] === Change mappings and settings for a data stream -Each data stream has a <> has a <>. Mappings and index settings from this template are applied to new backing indices created for the stream. This includes the stream's first backing index, which is auto-generated when the stream is created. diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 771a08d97d949..44ae77d072034 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -14,7 +14,7 @@ DELETE _ingest/pipeline/my-timestamp-pipeline // TEARDOWN //// -The recommended way to downsample a time series data stream (TSDS) is +The recommended way to <> a <> is <>. However, if you're not using ILM, you can downsample a TSDS manually. This guide shows you how, using typical Kubernetes cluster monitoring data. @@ -32,7 +32,7 @@ To test out manual downsampling, follow these steps: ==== Prerequisites * Refer to the <>. -* It is not possible to downsample a data stream directly, nor +* It is not possible to downsample a <> directly, nor multiple indices at once. It's only possible to downsample one time series index (TSDS backing index). * In order to downsample an index, it needs to be read-only. For a TSDS write diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index f20c949c2fbc8..315f7fa85e45f 100644 --- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -4,7 +4,7 @@ Delete Data Stream Lifecycle ++++ -Deletes the lifecycle from a set of data streams. +Deletes the <> from a set of data streams. [[delete-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 7968bb78939e8..2b15886ebe192 100644 --- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -4,7 +4,7 @@ Explain Data Stream Lifecycle ++++ -Retrieves the current data stream lifecycle status for one or more data stream backing indices. +Retrieves the current <> status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc index a99fa19d9db8d..f48fa1eb52daa 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets stats about the execution of data stream lifecycle. +Gets stats about the execution of <>. [[get-lifecycle-stats-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 331285af395b6..6bac1c7f7cc75 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -Gets the lifecycle of a set of data streams. +Gets the <> of a set of <>. [[get-lifecycle-api-prereqs]] ==== {api-prereq-title} @@ -128,14 +128,18 @@ The response will look like the following: "name": "my-data-stream-1", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } }, { "name": "my-data-stream-2", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } } ] diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 7d33a5b5f880c..c60c105e818ab 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -4,7 +4,7 @@ Put Data Stream Lifecycle ++++ -Configures the data stream lifecycle for the targeted data streams. +Configures the data stream <> for the targeted <>. [[put-lifecycle-api-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 16ccf2ef82391..e4d5acfb704d3 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -14,10 +14,11 @@ To achieve that, it supports: * Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. -{es} is allowed at a later time to delete data older than this time period. +{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level +or on a global level. Read more about the different options in this <>. A data stream lifecycle also supports downsampling the data stream backing indices. -See <> for +See <> for more details. [discrete] @@ -33,16 +34,17 @@ each data stream and performs the following steps: 3. After an index is not the write index anymore (i.e. the data stream has been rolled over), automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised -into tiers of exponential sizes, merging the long tail of small segments is only a +into tiers of exponential sizes, merging the long tail of small segments is only a fraction of the cost of force merging to a single segment. The small segments would usually hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. If <> is configured it will execute +4. If <> is configured it will execute all the configured downsampling rounds. 5. Applies retention to the remaining backing indices. This means deleting the backing indices whose -`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing -indices and it is either the time since the backing index got rolled over, or the time optionally configured in the -<> setting. +`generation_time` is longer than the effective retention period (read more about the +<>). The `generation_time` is only applicable to rolled +over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured +in the <> setting. IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but @@ -75,4 +77,6 @@ include::tutorial-manage-new-data-stream.asciidoc[] include::tutorial-manage-existing-data-stream.asciidoc[] +include::tutorial-manage-data-stream-retention.asciidoc[] + include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc new file mode 100644 index 0000000000000..1b2996c62e2df --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc @@ -0,0 +1,218 @@ +[role="xpack"] +[[tutorial-manage-data-stream-retention]] +=== Tutorial: Data stream retention + +In this tutorial, we are going to go over the data stream lifecycle retention; we will define it, go over how it can be configured +and how it can gets applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle. + +. <> +. <> +. <> +. <> + +You can verify if a data steam is managed by the data stream lifecycle via the <>: + +//// +[source,console] +---- +PUT /_index_template/template +{ + "index_patterns": ["my-data-stream*"], + "template": { + "lifecycle": {} + }, + "data_stream": { } +} + +PUT /_data_stream/my-data-stream +---- +// TESTSETUP +//// + +//// +[source,console] +---- +DELETE /_data_stream/my-data-stream* +DELETE /_index_template/template +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.*" : null + } +} +---- +// TEARDOWN +//// + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- + +The result should look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream", <1> + "lifecycle": { + "enabled": true <2> + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of your data stream. +<2> Ensure that the lifecycle is enabled, meaning this should be `true`. + +[discrete] +[[what-is-retention]] +==== What is data stream retention? + +We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period +has passed, {es} is allowed to remove these data to free up space and/or manage costs. + +NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept. + +We define 4 different types of retention: + +* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be +set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data +need to be kept forever. +* The global default retention, let's call it `default_retention`, which is a retention configured via the cluster setting +<> and will be +applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, +it ensures that there will be no data streams keeping their data forever. This can be set via the +<>. +* The global max retention, let's call it `max_retention`, which is a retention configured via the cluster setting +<> and will be applied to +all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention +will exceed this time period. This can be set via the <>. +* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. +Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is +calculated as it is described <>. + +NOTE: Global default and max retention do not apply to data streams internal to elastic. Internal data streams are recognised + either by having the `system` flag set to `true` or if their name is prefixed with a dot (`.`). + +[discrete] +[[retention-configuration]] +==== How to configure retention? + +- By setting the `data_retention` on the data stream level. This retention can be configured in two ways: ++ +-- For new data streams, it can be defined in the index template that would be applied during the data stream's creation. +You can use the <>, for example: ++ +[source,console] +-------------------------------------------------- +PUT _index_template/template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- +-- For an existing data stream, it can be set via the <>. ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. + +- By setting the global retention via the `data_streams.lifecycle.retention.default` and/or `data_streams.lifecycle.retention.max` +that are set on a cluster level. You can be set via the <>. For example: ++ +[source,console] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.default" : "7d", + "data_streams.lifecycle.retention.max" : "90d" + } +} +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[effective-retention-calculation]] +==== How is the effective retention calculated? +The effective is calculated in the following way: + +- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not +have `data_retention`. +- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined, +it is less than the `max_retention`. +- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no +`data_retention` or its `data_retention` is greater than the `max_retention`. + +The above is demonstrated in the examples below: + +|=== +|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by + +|Not set |Not set |Not set |Infinite |N/A +|Not relevant |12 months |**30 days** |30 days |`data_retention` +|Not relevant |Not set |**30 days** |30 days |`data_retention` +|**30 days** |12 months |Not set |30 days |`default_retention` +|**30 days** |30 days |Not set |30 days |`default_retention` +|Not relevant |**30 days** |12 months |30 days |`max_retention` +|Not set |**30 days** |Not set |30 days |`max_retention` +|=== + +Considering our example, if we retrieve the lifecycle of `my-data-stream`: +[source,console] +---- +GET _data_stream/my-data-stream/_lifecycle +---- +// TEST[continued] + +We see that it will remain the same with what the user configured: +[source,console-result] +---- +{ + "data_streams": [ + { + "name": "my-data-stream", + "lifecycle": { + "enabled": true, + "data_retention": "30d", + "effective_retention": "30d", + "retention_determined_by": "data_stream_configuration" + } + } + ] +} +---- + +[discrete] +[[effective-retention-application]] +==== How is the effective retention applied? + +Retention is applied to the remaining backing indices of a data stream as the last step of +<>. Data stream lifecycle will retrieve the backing indices +whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only +applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time +optionally configured in the <> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but +the minimum time data will be stored. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc index c34340a096046..01d51cdde3167 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -91,10 +91,12 @@ The result will look like this: { "data_streams": [ { - "name": "my-data-stream",<1> + "name": "my-data-stream", <1> "lifecycle": { - "enabled": true, <2> - "data_retention": "7d" <3> + "enabled": true, <2> + "data_retention": "7d", <3> + "effective_retention": "7d", <4> + "retention_determined_by": "data_stream_configuration" } } ] @@ -102,8 +104,9 @@ The result will look like this: -------------------------------------------------- <1> The name of your data stream. <2> Shows if the data stream lifecycle is enabled for this data stream. -<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will -be kept at least for 7 days. After that {es} can delete it at its own discretion. +<3> The retention period of the data indexed in this data stream, as configured by the user. +<4> The retention period that will be applied by the data stream lifecycle. This means that the data in this data stream will + be kept at least for 7 days. After that {es} can delete it at its own discretion. If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the <>: diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 5b2e2a1ec70a2..a2c12466b7f2b 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle -In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to -data stream lifecycle. The existing {ilm-init} managed backing indices will continue +In this tutorial we'll look at migrating an existing data stream from <> to +<>. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-init} to +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle -can co-manage a data stream; however, an index can only be managed by one system at +can co-manage a data stream; however, an index can only be managed by one system at a time. [discrete] @@ -17,7 +17,7 @@ a time. To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ data stream using the <>. @@ -174,8 +174,8 @@ in the index template). To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> -to `false`, and to configure data stream lifecycle. +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. @@ -209,9 +209,9 @@ PUT _index_template/dsl-data-stream-template // TEST[continued] <1> The `prefer_ilm` setting will now be configured on the **new** backing indices -(created by rolling over the data stream) such that {ilm-init} does _not_ take +(created by rolling over the data stream) such that {ilm-init} does _not_ take precedence over data stream lifecycle. -<2> We're configuring the data stream lifecycle so _new_ data streams will be +<2> We're configuring the data stream lifecycle so _new_ data streams will be managed by data stream lifecycle. We've now made sure that new data streams will be managed by data stream lifecycle. @@ -227,7 +227,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle ---- // TEST[continued] -We can inspect the data stream to check that the next generation will indeed be +We can inspect the data stream to check that the next generation will indeed be managed by data stream lifecycle: [source,console] @@ -266,7 +266,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> @@ -292,7 +294,7 @@ GET _data_stream/dsl-data-stream <4> The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. -We'll now rollover the data stream to see the new generation index being managed by +We'll now rollover the data stream to see the new generation index being managed by data stream lifecycle: [source,console] @@ -344,7 +346,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", @@ -375,9 +379,9 @@ in the index template [discrete] [[migrate-from-dsl-to-ilm]] ==== Migrate data stream back to ILM -We can easily change this data stream to be managed by {ilm-init} because we didn't remove -the {ilm-init} policy when we <>. +We can easily change this data stream to be managed by {ilm-init} because we didn't remove +the {ilm-init} policy when we <>. We can achieve this in two ways: diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc index f05e76e67c32f..2da869083df22 100644 --- a/docs/reference/data-streams/modify-data-streams-api.asciidoc +++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc @@ -4,7 +4,7 @@ Modify data streams ++++ -Performs one or more data stream modification actions in a single atomic +Performs one or more <> modification actions in a single atomic operation. [source,console] diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc index 281e9b549abcb..111c7a2256f8a 100644 --- a/docs/reference/data-streams/promote-data-stream-api.asciidoc +++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc @@ -5,7 +5,7 @@ Promote data stream ++++ -The purpose of the promote data stream api is to turn +The purpose of the promote <> API is to turn a data stream that is replicated by CCR into a regular data stream. diff --git a/docs/reference/data-streams/tsds-reindex.asciidoc b/docs/reference/data-streams/tsds-reindex.asciidoc index ea4ba16df5c4a..9d6594db4e779 100644 --- a/docs/reference/data-streams/tsds-reindex.asciidoc +++ b/docs/reference/data-streams/tsds-reindex.asciidoc @@ -9,7 +9,7 @@ [[tsds-reindex-intro]] ==== Introduction -With reindexing, you can copy documents from an old time-series data stream (TSDS) to a new one. Data streams support +With reindexing, you can copy documents from an old <> to a new one. Data streams support reindexing in general, with a few <>. Still, time-series data streams introduce additional challenges due to tight control on the accepted timestamp range for each backing index they contain. Direct use of the reindex API would likely error out due to attempting to insert documents with timestamps that are diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index de89fa1ca3f31..01573658c33d0 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -107,6 +107,7 @@ parameter: * <> * <> * <> +* <> For a flattened field, use the `time_series_dimensions` parameter to configure an array of fields as dimensions. For details refer to <>. diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc index d3f591ccfe6c1..e8cc2b21492ae 100644 --- a/docs/reference/eql/eql-apis.asciidoc +++ b/docs/reference/eql/eql-apis.asciidoc @@ -1,7 +1,7 @@ [[eql-apis]] == EQL APIs -Event Query Language (EQL) is a query language for event-based time series data, +<> is a query language for event-based time series data, such as logs, metrics, and traces. For an overview of EQL and related tutorials, see <>. diff --git a/docs/reference/esql/esql-across-clusters.asciidoc b/docs/reference/esql/esql-across-clusters.asciidoc index 8bc1e2a83fc19..d13b3db1c73ea 100644 --- a/docs/reference/esql/esql-across-clusters.asciidoc +++ b/docs/reference/esql/esql-across-clusters.asciidoc @@ -54,11 +54,6 @@ Refer to <> for prerequisi [[esql-ccs-security-model-api-key]] ===== API key authentication -[NOTE] -==== -`ENRICH` is *not supported* in this version when using {esql} with the API key based security model. -==== - The following information pertains to using {esql} across clusters with the <>. You'll need to follow the steps on that page for the *full setup instructions*. This page only contains additional information specific to {esql}. API key based cross-cluster search (CCS) enables more granular control over allowed actions between clusters. @@ -71,6 +66,7 @@ You will need to: Using {esql} with the API key based security model requires some additional permissions that may not be needed when using the traditional query DSL based search. The following example API call creates a role that can query remote indices using {esql} when using the API key based security model. +The final privilege, `remote_cluster`, is required to allow remote enrich operations. [source,console] ---- @@ -89,7 +85,17 @@ POST /_security/role/remote1 "privileges": [ "read","read_cross_cluster" ], <4> "clusters" : ["my_remote_cluster"] <5> } - ] + ], + "remote_cluster": [ <6> + { + "privileges": [ + "monitor_enrich" + ], + "clusters": [ + "my_remote_cluster" + ] + } + ] } ---- @@ -100,6 +106,7 @@ POST /_security/role/remote1 <5> The remote clusters to which these privileges apply. This remote cluster must be configured with a <> and connected to the remote cluster before the remote index can be queried. Verify connection using the <> API. +<6> Required to allow remote enrichment. Without this, the user cannot read from the `.enrich` indices on the remote cluster. The `remote_cluster` security privilege was introduced in version *8.15.0*. You will then need a user or API key with the permissions you created above. The following example API call creates a user with the `remote1` role. @@ -114,6 +121,11 @@ POST /_security/user/remote_user Remember that all cross-cluster requests from the local cluster are bound by the cross cluster API key’s privileges, which are controlled by the remote cluster's administrator. +[TIP] +==== +Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to add the new permissions required for {esql} with ENRICH. +==== + [discrete] [[ccq-remote-cluster-setup]] ==== Remote cluster setup @@ -174,9 +186,11 @@ clusters, aiming to minimize computation or inter-cluster data transfer. Ensurin the policy exists with consistent data on both the local cluster and the remote clusters is critical for ES|QL to produce a consistent query result. -[NOTE] +[TIP] ==== -Enrich across clusters is *not supported* in this version when using {esql} with the <>. +Enrich in {esql} across clusters using the API key based security model was introduced in version *8.15.0*. +Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to use the new required permissions. +Refer to the example in the <> section. ==== In the following example, the enrich with `hosts` policy can be executed on diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc index 686a71506bc14..8586cd1ae6bce 100644 --- a/docs/reference/esql/esql-apis.asciidoc +++ b/docs/reference/esql/esql-apis.asciidoc @@ -1,7 +1,7 @@ [[esql-apis]] == {esql} APIs -The {es} Query Language ({esql}) provides a powerful way to filter, transform, +The <> provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. For an overview of {esql} and related tutorials, see <>. diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc index 90f8c06b9124a..5cad566f7f9c0 100644 --- a/docs/reference/esql/esql-async-query-delete-api.asciidoc +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query delete API ++++ -The {esql} async query delete API is used to manually delete an async query +The <> async query delete API is used to manually delete an async query by ID. If the query is still running, the query will be cancelled. Otherwise, the stored results are deleted. diff --git a/docs/reference/esql/esql-commands.asciidoc b/docs/reference/esql/esql-commands.asciidoc index bed79299b1cc1..235113ac1394a 100644 --- a/docs/reference/esql/esql-commands.asciidoc +++ b/docs/reference/esql/esql-commands.asciidoc @@ -37,6 +37,9 @@ image::images/esql/processing-command.svg[A processing command changing an input * <> * <> * <> +ifeval::["{release-state}"=="unreleased"] +* experimental:[] <> +endif::[] * <> * <> ifeval::["{release-state}"=="unreleased"] @@ -59,6 +62,9 @@ include::processing-commands/drop.asciidoc[] include::processing-commands/enrich.asciidoc[] include::processing-commands/eval.asciidoc[] include::processing-commands/grok.asciidoc[] +ifeval::["{release-state}"=="unreleased"] +include::processing-commands/inlinestats.asciidoc[] +endif::[] include::processing-commands/keep.asciidoc[] include::processing-commands/limit.asciidoc[] ifeval::["{release-state}"=="unreleased"] diff --git a/docs/reference/esql/esql-multi-index.asciidoc b/docs/reference/esql/esql-multi-index.asciidoc index 41ff6a27417b1..25874a132d93d 100644 --- a/docs/reference/esql/esql-multi-index.asciidoc +++ b/docs/reference/esql/esql-multi-index.asciidoc @@ -97,13 +97,12 @@ In addition, if the query refers to this unsupported field directly, the query f [source.merge.styled,esql] ---- FROM events_* -| KEEP @timestamp, client_ip, event_duration, message -| SORT @timestamp DESC +| SORT client_ip DESC ---- [source,bash] ---- -Cannot use field [client_ip] due to ambiguities being mapped as +Cannot use field [client_ip] due to ambiguities being mapped as [2] incompatible types: [ip] in [events_ip], [keyword] in [events_keyword] @@ -113,12 +112,13 @@ Cannot use field [client_ip] due to ambiguities being mapped as [[esql-multi-index-union-types]] === Union types +experimental::[] + {esql} has a way to handle <>. When the same field is mapped to multiple types in multiple indices, the type of the field is understood to be a _union_ of the various types in the index mappings. As seen in the preceding examples, this _union type_ cannot be used in the results, -and cannot be referred to by the query --- except when it's passed to a type conversion function that accepts all the types in the _union_ and converts the field -to a single type. {esql} offers a suite of <> to achieve this. +and cannot be referred to by the query -- except in `KEEP`, `DROP` or when it's passed to a type conversion function that accepts all the types in +the _union_ and converts the field to a single type. {esql} offers a suite of <> to achieve this. In the above examples, the query can use a command like `EVAL client_ip = TO_IP(client_ip)` to resolve the union of `ip` and `keyword` to just `ip`. diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index e8cfa03e3ee88..c8c735b73d2a4 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -102,7 +102,7 @@ Column `name` and `type` for each column returned in `values`. Each object is a Column `name` and `type` for each queried column. Each object is a single column. This is only returned if `drop_null_columns` is sent with the request. -`rows`:: +`values`:: (array of arrays) Values for the search results. diff --git a/docs/reference/esql/esql-rest.asciidoc b/docs/reference/esql/esql-rest.asciidoc index 5b90e96d7a734..2c8c5e81e273d 100644 --- a/docs/reference/esql/esql-rest.asciidoc +++ b/docs/reference/esql/esql-rest.asciidoc @@ -278,6 +278,47 @@ POST /_query ---- // TEST[setup:library] +The parameters can be named parameters or positional parameters. + +Named parameters use question mark placeholders (`?`) followed by a string. + +[source,console] +---- +POST /_query +{ + "query": """ + FROM library + | EVAL year = DATE_EXTRACT("year", release_date) + | WHERE page_count > ?page_count AND author == ?author + | STATS count = COUNT(*) by year + | WHERE count > ?count + | LIMIT 5 + """, + "params": [{"page_count" : 300}, {"author" : "Frank Herbert"}, {"count" : 0}] +} +---- +// TEST[setup:library] + +Positional parameters use question mark placeholders (`?`) followed by an +integer. + +[source,console] +---- +POST /_query +{ + "query": """ + FROM library + | EVAL year = DATE_EXTRACT("year", release_date) + | WHERE page_count > ?1 AND author == ?2 + | STATS count = COUNT(*) by year + | WHERE count > ?3 + | LIMIT 5 + """, + "params": [300, "Frank Herbert", 0] +} +---- +// TEST[setup:library] + [discrete] [[esql-rest-async-query]] ==== Running an async {esql} query diff --git a/docs/reference/esql/functions/aggregation-functions.asciidoc b/docs/reference/esql/functions/aggregation-functions.asciidoc index 4c248704b6385..7cdc42ea6cbf9 100644 --- a/docs/reference/esql/functions/aggregation-functions.asciidoc +++ b/docs/reference/esql/functions/aggregation-functions.asciidoc @@ -9,30 +9,30 @@ The <> command supports these aggregate functions: // tag::agg_list[] * <> -* <> -* <> +* <> +* <> * <> -* <> -* <> +* <> +* <> * <> * <> -* experimental:[] <> -* <> +* experimental:[] <> +* <> * <> -* <> -* experimental:[] <> +* <> +* experimental:[] <> // end::agg_list[] -include::count.asciidoc[] -include::count-distinct.asciidoc[] -include::median.asciidoc[] -include::median-absolute-deviation.asciidoc[] -include::st_centroid_agg.asciidoc[] -include::sum.asciidoc[] include::layout/avg.asciidoc[] +include::layout/count.asciidoc[] +include::layout/count_distinct.asciidoc[] include::layout/max.asciidoc[] +include::layout/median.asciidoc[] +include::layout/median_absolute_deviation.asciidoc[] include::layout/min.asciidoc[] include::layout/percentile.asciidoc[] +include::layout/st_centroid_agg.asciidoc[] +include::layout/sum.asciidoc[] include::layout/top.asciidoc[] -include::values.asciidoc[] -include::weighted-avg.asciidoc[] +include::layout/values.asciidoc[] +include::layout/weighted_avg.asciidoc[] diff --git a/docs/reference/esql/functions/appendix/count_distinct.asciidoc b/docs/reference/esql/functions/appendix/count_distinct.asciidoc new file mode 100644 index 0000000000000..065065cf34e06 --- /dev/null +++ b/docs/reference/esql/functions/appendix/count_distinct.asciidoc @@ -0,0 +1,25 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-agg-count-distinct-approximate]] +==== Counts are approximate + +Computing exact counts requires loading values into a set and returning its +size. This doesn't scale when working on high-cardinality sets and/or large +values as the required memory usage and the need to communicate those +per-shard sets between nodes would utilize too many resources of the cluster. + +This `COUNT_DISTINCT` function is based on the +https://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf[HyperLogLog++] +algorithm, which counts based on the hashes of the values with some interesting +properties: + +include::../../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation] + +The `COUNT_DISTINCT` function takes an optional second parameter to configure +the precision threshold. The precision_threshold options allows to trade memory +for accuracy, and defines a unique count below which counts are expected to be +close to accurate. Above this value, counts might become a bit more fuzzy. The +maximum supported value is 40000, thresholds above this number will have the +same effect as a threshold of 40000. The default value is `3000`. + diff --git a/docs/reference/esql/functions/appendix/median.asciidoc b/docs/reference/esql/functions/appendix/median.asciidoc new file mode 100644 index 0000000000000..929a4ed0dae2c --- /dev/null +++ b/docs/reference/esql/functions/appendix/median.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[WARNING] +==== +`MEDIAN` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic]. +This means you can get slightly different results using the same data. +==== diff --git a/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..a4f96c800946b --- /dev/null +++ b/docs/reference/esql/functions/appendix/median_absolute_deviation.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[WARNING] +==== +`MEDIAN_ABSOLUTE_DEVIATION` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic]. +This means you can get slightly different results using the same data. +==== diff --git a/docs/reference/esql/functions/appendix/values.asciidoc b/docs/reference/esql/functions/appendix/values.asciidoc new file mode 100644 index 0000000000000..ec3cfff2db6a6 --- /dev/null +++ b/docs/reference/esql/functions/appendix/values.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[WARNING] +==== +This can use a significant amount of memory and ES|QL doesn't yet +grow aggregations beyond memory. So this aggregation will work until +it is used to collect more values than can fit into memory. Once it +collects too many values it will fail the query with +a <>. +==== diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc deleted file mode 100644 index b5fda636135b2..0000000000000 --- a/docs/reference/esql/functions/case.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -[discrete] -[[esql-case]] -=== `CASE` - -*Syntax* - -[source,esql] ----- -CASE(condition1, value1[, ..., conditionN, valueN][, default_value]) ----- - -*Parameters* - -`conditionX`:: -A condition. - -`valueX`:: -The value that's returned when the corresponding condition is the first to -evaluate to `true`. - -`default_value`:: -The default value that's is returned when no condition matches. - -*Description* - -Accepts pairs of conditions and values. The function returns the value that -belongs to the first condition that evaluates to `true`. - -If the number of arguments is odd, the last argument is the default value which -is returned when no condition matches. If the number of arguments is even, and -no condition matches, the function returns `null`. - -*Example* - -Determine whether employees are monolingual, bilingual, or polyglot: - -[source,esql] -[source.merge.styled,esql] ----- -include::{esql-specs}/docs.csv-spec[tag=case] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/docs.csv-spec[tag=case-result] -|=== - -Calculate the total connection success rate based on log messages: - -[source,esql] -[source.merge.styled,esql] ----- -include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate-result] -|=== - -Calculate an hourly error rate as a percentage of the total number of log -messages: - -[source,esql] -[source.merge.styled,esql] ----- -include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result] -|=== diff --git a/docs/reference/esql/functions/count-distinct.asciidoc b/docs/reference/esql/functions/count-distinct.asciidoc deleted file mode 100644 index a9f30d24e0e83..0000000000000 --- a/docs/reference/esql/functions/count-distinct.asciidoc +++ /dev/null @@ -1,85 +0,0 @@ -[discrete] -[[esql-agg-count-distinct]] -=== `COUNT_DISTINCT` - -*Syntax* - -[source,esql] ----- -COUNT_DISTINCT(expression[, precision_threshold]) ----- - -*Parameters* - -`expression`:: -Expression that outputs the values on which to perform a distinct count. - -`precision_threshold`:: -Precision threshold. Refer to <>. The -maximum supported value is 40000. Thresholds above this number will have the -same effect as a threshold of 40000. The default value is 3000. - -*Description* - -Returns the approximate number of distinct values. - -*Supported types* - -Can take any field type as input. - -*Examples* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] -|=== - -With the optional second parameter to configure the precision threshold: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision-result] -|=== - -The expression can use inline functions. This example splits a string into -multiple values using the `SPLIT` function and counts the unique values: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression-result] -|=== - -[discrete] -[[esql-agg-count-distinct-approximate]] -==== Counts are approximate - -Computing exact counts requires loading values into a set and returning its -size. This doesn't scale when working on high-cardinality sets and/or large -values as the required memory usage and the need to communicate those -per-shard sets between nodes would utilize too many resources of the cluster. - -This `COUNT_DISTINCT` function is based on the -https://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf[HyperLogLog++] -algorithm, which counts based on the hashes of the values with some interesting -properties: - -include::../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation] - -The `COUNT_DISTINCT` function takes an optional second parameter to configure -the precision threshold. The precision_threshold options allows to trade memory -for accuracy, and defines a unique count below which counts are expected to be -close to accurate. Above this value, counts might become a bit more fuzzy. The -maximum supported value is 40000, thresholds above this number will have the -same effect as a threshold of 40000. The default value is `3000`. \ No newline at end of file diff --git a/docs/reference/esql/functions/count.asciidoc b/docs/reference/esql/functions/count.asciidoc deleted file mode 100644 index 66cfe76350cdd..0000000000000 --- a/docs/reference/esql/functions/count.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -[discrete] -[[esql-agg-count]] -=== `COUNT` - -*Syntax* - -[source,esql] ----- -COUNT([expression]) ----- - -*Parameters* - -`expression`:: -Expression that outputs values to be counted. -If omitted, equivalent to `COUNT(*)` (the number of rows). - -*Description* - -Returns the total number (count) of input values. - -*Supported types* - -Can take any field type as input. - -*Examples* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=count] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=count-result] -|=== - -To count the number of rows, use `COUNT()` or `COUNT(*)`: - -[source.merge.styled,esql] ----- -include::{esql-specs}/docs.csv-spec[tag=countAll] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/docs.csv-spec[tag=countAll-result] -|=== - -The expression can use inline functions. This example splits a string into -multiple values using the `SPLIT` function and counts the values: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression-result] -|=== - -[[esql-agg-count-or-null]] -To count the number of times an expression returns `TRUE` use -a <> command to remove rows that shouldn't be included: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=count-where] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=count-where-result] -|=== - -To count the same stream of data based on two different expressions -use the pattern `COUNT( OR NULL)`: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=count-or-null] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=count-or-null-result] -|=== diff --git a/docs/reference/esql/functions/description/count.asciidoc b/docs/reference/esql/functions/description/count.asciidoc new file mode 100644 index 0000000000000..ee806d65a8ea3 --- /dev/null +++ b/docs/reference/esql/functions/description/count.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns the total number (count) of input values. diff --git a/docs/reference/esql/functions/description/count_distinct.asciidoc b/docs/reference/esql/functions/description/count_distinct.asciidoc new file mode 100644 index 0000000000000..d10825bb991f5 --- /dev/null +++ b/docs/reference/esql/functions/description/count_distinct.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns the approximate number of distinct values. diff --git a/docs/reference/esql/functions/description/median.asciidoc b/docs/reference/esql/functions/description/median.asciidoc new file mode 100644 index 0000000000000..ff3b7b32ed15e --- /dev/null +++ b/docs/reference/esql/functions/description/median.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The value that is greater than half of all values and less than half of all values, also known as the 50% <>. + +NOTE: Like <>, `MEDIAN` is <>. diff --git a/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..1a363920dd422 --- /dev/null +++ b/docs/reference/esql/functions/description/median_absolute_deviation.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation. It is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. + +NOTE: Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is <>. diff --git a/docs/reference/esql/functions/description/mv_percentile.asciidoc b/docs/reference/esql/functions/description/mv_percentile.asciidoc new file mode 100644 index 0000000000000..3e731f6525cec --- /dev/null +++ b/docs/reference/esql/functions/description/mv_percentile.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. diff --git a/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc new file mode 100644 index 0000000000000..d464689f40a01 --- /dev/null +++ b/docs/reference/esql/functions/description/mv_pseries_weighted_sum.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum. diff --git a/docs/reference/esql/functions/description/st_centroid_agg.asciidoc b/docs/reference/esql/functions/description/st_centroid_agg.asciidoc new file mode 100644 index 0000000000000..740accf02c33f --- /dev/null +++ b/docs/reference/esql/functions/description/st_centroid_agg.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Calculate the spatial centroid over a field with spatial point geometry type. diff --git a/docs/reference/esql/functions/description/sum.asciidoc b/docs/reference/esql/functions/description/sum.asciidoc new file mode 100644 index 0000000000000..e3956567b8656 --- /dev/null +++ b/docs/reference/esql/functions/description/sum.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The sum of a numeric expression. diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc index b37bd6b22ac2f..91cbfa0b5fe1e 100644 --- a/docs/reference/esql/functions/description/to_datetime.asciidoc +++ b/docs/reference/esql/functions/description/to_datetime.asciidoc @@ -3,3 +3,5 @@ *Description* Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. + +NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/description/values.asciidoc b/docs/reference/esql/functions/description/values.asciidoc new file mode 100644 index 0000000000000..b3cebcce955f0 --- /dev/null +++ b/docs/reference/esql/functions/description/values.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. If you need the values returned in order use <>. diff --git a/docs/reference/esql/functions/description/weighted_avg.asciidoc b/docs/reference/esql/functions/description/weighted_avg.asciidoc new file mode 100644 index 0000000000000..a15d5d4ea171d --- /dev/null +++ b/docs/reference/esql/functions/description/weighted_avg.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The weighted average of a numeric expression. diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index e1bba0529d7db..4afea30660339 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -86,10 +86,6 @@ include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan] |=== include::{esql-specs}/bucket.csv-spec[tag=docsBucketNumericWithSpan-result] |=== - -NOTE: When providing the bucket size as the second parameter, it must be -of a floating point type. - Create hourly buckets for the last 24 hours, and calculate the number of events per hour: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/examples/count.asciidoc b/docs/reference/esql/functions/examples/count.asciidoc new file mode 100644 index 0000000000000..fb696b51e054c --- /dev/null +++ b/docs/reference/esql/functions/examples/count.asciidoc @@ -0,0 +1,49 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=count] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=count-result] +|=== +To count the number of rows, use `COUNT()` or `COUNT(*)` +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=countAll] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/docs.csv-spec[tag=countAll-result] +|=== +The expression can use inline functions. This example splits a string into multiple values using the `SPLIT` function and counts the values +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=docsCountWithExpression-result] +|=== +To count the number of times an expression returns `TRUE` use a <> command to remove rows that shouldn't be included +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=count-where] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=count-where-result] +|=== +To count the same stream of data based on two different expressions use the pattern `COUNT( OR NULL)` +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=count-or-null] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=count-or-null-result] +|=== + diff --git a/docs/reference/esql/functions/examples/count_distinct.asciidoc b/docs/reference/esql/functions/examples/count_distinct.asciidoc new file mode 100644 index 0000000000000..44968c0652ec0 --- /dev/null +++ b/docs/reference/esql/functions/examples/count_distinct.asciidoc @@ -0,0 +1,31 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] +|=== +With the optional second parameter to configure the precision threshold +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-precision-result] +|=== +The expression can use inline functions. This example splits a string into multiple values using the `SPLIT` function and counts the unique values +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_count_distinct.csv-spec[tag=docsCountDistinctWithExpression-result] +|=== + diff --git a/docs/reference/esql/functions/examples/median.asciidoc b/docs/reference/esql/functions/examples/median.asciidoc new file mode 100644 index 0000000000000..cb6248dcff148 --- /dev/null +++ b/docs/reference/esql/functions/examples/median.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_percentile.csv-spec[tag=median] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_percentile.csv-spec[tag=median-result] +|=== +The expression can use inline functions. For example, to calculate the median of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN` function +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression-result] +|=== + diff --git a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..20891126c20fb --- /dev/null +++ b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation-result] +|=== +The expression can use inline functions. For example, to calculate the the median absolute deviation of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN_ABSOLUTE_DEVIATION` function +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression-result] +|=== + diff --git a/docs/reference/esql/functions/examples/mv_percentile.asciidoc b/docs/reference/esql/functions/examples/mv_percentile.asciidoc new file mode 100644 index 0000000000000..9b20a5bef5e0d --- /dev/null +++ b/docs/reference/esql/functions/examples/mv_percentile.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/mv_percentile.csv-spec[tag=example] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/mv_percentile.csv-spec[tag=example-result] +|=== + diff --git a/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc new file mode 100644 index 0000000000000..bce4deb1f5225 --- /dev/null +++ b/docs/reference/esql/functions/examples/mv_pseries_weighted_sum.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/mv_pseries_weighted_sum.csv-spec[tag=example] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/mv_pseries_weighted_sum.csv-spec[tag=example-result] +|=== + diff --git a/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc b/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc new file mode 100644 index 0000000000000..69c291b738828 --- /dev/null +++ b/docs/reference/esql/functions/examples/st_centroid_agg.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sum.asciidoc b/docs/reference/esql/functions/examples/sum.asciidoc new file mode 100644 index 0000000000000..1c02ccd784a54 --- /dev/null +++ b/docs/reference/esql/functions/examples/sum.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=sum] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=sum-result] +|=== +The expression can use inline functions. For example, to calculate the sum of each employee's maximum salary changes, apply the `MV_MAX` function to each row and then sum the results +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression-result] +|=== + diff --git a/docs/reference/esql/functions/examples/values.asciidoc b/docs/reference/esql/functions/examples/values.asciidoc new file mode 100644 index 0000000000000..c013fc39d92ca --- /dev/null +++ b/docs/reference/esql/functions/examples/values.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=values-grouped] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=values-grouped-result] +|=== + diff --git a/docs/reference/esql/functions/examples/weighted_avg.asciidoc b/docs/reference/esql/functions/examples/weighted_avg.asciidoc new file mode 100644 index 0000000000000..e8e8cc3eda006 --- /dev/null +++ b/docs/reference/esql/functions/examples/weighted_avg.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=weighted-avg] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=weighted-avg-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/add.json b/docs/reference/esql/functions/kibana/definition/add.json new file mode 100644 index 0000000000000..0932a76966560 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/add.json @@ -0,0 +1,296 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "add", + "description" : "Add two numbers together. If either field is <> then the result is `null`.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 7141ca4c27443..94214a3a4f047 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -8,7 +8,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -16,17 +16,17 @@ "name" : "buckets", "type" : "date_period", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -34,29 +34,269 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", - "type" : "datetime", + "type" : "date", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", - "type" : "datetime", + "type" : "date", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "date", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "date", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "date", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "keyword", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "date", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "keyword", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + }, + { + "name" : "from", + "type" : "text", + "optional" : true, + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." + }, + { + "name" : "to", + "type" : "text", + "optional" : true, + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", "optional" : false, "description" : "Numeric or date expression from which to derive buckets." }, @@ -64,11 +304,11 @@ "name" : "buckets", "type" : "time_duration", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -82,7 +322,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -100,19 +358,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -130,19 +388,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -160,19 +418,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -190,19 +448,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -220,19 +478,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -250,19 +508,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -280,19 +538,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -310,19 +568,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -340,19 +598,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -370,7 +646,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -388,19 +682,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -418,19 +712,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -448,19 +742,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -478,19 +772,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -508,19 +802,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -538,19 +832,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -568,19 +862,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -598,19 +892,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -628,19 +922,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -658,7 +970,25 @@ "name" : "buckets", "type" : "double", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "integer", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, @@ -676,19 +1006,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -706,19 +1036,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -736,19 +1066,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "double", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -766,19 +1096,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -796,19 +1126,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -826,19 +1156,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "integer", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -856,19 +1186,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "double", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -886,19 +1216,19 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "integer", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." } ], "variadic" : false, @@ -916,19 +1246,37 @@ "name" : "buckets", "type" : "integer", "optional" : false, - "description" : "Target number of buckets." + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." }, { "name" : "from", "type" : "long", "optional" : true, - "description" : "Start of the range. Can be a number or a date expressed as a string." + "description" : "Start of the range. Can be a number, a date or a date expressed as a string." }, { "name" : "to", "type" : "long", "optional" : true, - "description" : "End of the range. Can be a number or a date expressed as a string." + "description" : "End of the range. Can be a number, a date or a date expressed as a string." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Numeric or date expression from which to derive buckets." + }, + { + "name" : "buckets", + "type" : "long", + "optional" : false, + "description" : "Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 5959eed62d37b..27705cd3897f9 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -50,13 +50,13 @@ }, { "name" : "trueValue", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index f00f471e63ecc..2459a4d51bb2d 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -74,19 +74,19 @@ "params" : [ { "name" : "first", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Expression to evaluate." }, { "name" : "rest", - "type" : "datetime", + "type" : "date", "optional" : true, "description" : "Other expression to evaluate." } ], "variadic" : true, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/count.json b/docs/reference/esql/functions/kibana/definition/count.json new file mode 100644 index 0000000000000..2a15fb3bdd335 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/count.json @@ -0,0 +1,159 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "count", + "description" : "Returns the total number (count) of input values.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "cartesian_point", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "geo_point", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : true, + "description" : "Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows)." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM employees\n| STATS COUNT(height)", + "FROM employees \n| STATS count = COUNT(*) BY languages \n| SORT languages DESC", + "ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS word_count = COUNT(SPLIT(words, \";\"))", + "ROW n=1\n| WHERE n < 0\n| STATS COUNT(n)", + "ROW n=1\n| STATS COUNT(n > 0 OR NULL), COUNT(n < 0 OR NULL)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/count_distinct.json b/docs/reference/esql/functions/kibana/definition/count_distinct.json new file mode 100644 index 0000000000000..f6a148783ba42 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/count_distinct.json @@ -0,0 +1,607 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "count_distinct", + "description" : "Returns the approximate number of distinct values.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM hosts\n| STATS COUNT_DISTINCT(ip0), COUNT_DISTINCT(ip1)", + "FROM hosts\n| STATS COUNT_DISTINCT(ip0, 80000), COUNT_DISTINCT(ip1, 5)", + "ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS distinct_word_count = COUNT_DISTINCT(SPLIT(words, \";\"))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/date_diff.json b/docs/reference/esql/functions/kibana/definition/date_diff.json index 7995d3c6d32b6..d6589f041075d 100644 --- a/docs/reference/esql/functions/kibana/definition/date_diff.json +++ b/docs/reference/esql/functions/kibana/definition/date_diff.json @@ -14,13 +14,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } @@ -38,13 +38,13 @@ }, { "name" : "startTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing a start timestamp" }, { "name" : "endTimestamp", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "A string representing an end timestamp" } diff --git a/docs/reference/esql/functions/kibana/definition/date_extract.json b/docs/reference/esql/functions/kibana/definition/date_extract.json index 75cedcc191b50..557f0e0a47e54 100644 --- a/docs/reference/esql/functions/kibana/definition/date_extract.json +++ b/docs/reference/esql/functions/kibana/definition/date_extract.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json index 5e8587c046d70..7bd01d7f4ef31 100644 --- a/docs/reference/esql/functions/kibana/definition/date_format.json +++ b/docs/reference/esql/functions/kibana/definition/date_format.json @@ -14,7 +14,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } @@ -32,7 +32,7 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression. If `null`, the function returns `null`." } diff --git a/docs/reference/esql/functions/kibana/definition/date_parse.json b/docs/reference/esql/functions/kibana/definition/date_parse.json index 890179143bef8..9400340750c2a 100644 --- a/docs/reference/esql/functions/kibana/definition/date_parse.json +++ b/docs/reference/esql/functions/kibana/definition/date_parse.json @@ -20,7 +20,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -56,7 +56,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -74,7 +74,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/date_trunc.json b/docs/reference/esql/functions/kibana/definition/date_trunc.json index 3d8658c496529..bd3f362d1670b 100644 --- a/docs/reference/esql/functions/kibana/definition/date_trunc.json +++ b/docs/reference/esql/functions/kibana/definition/date_trunc.json @@ -14,13 +14,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -32,13 +32,13 @@ }, { "name" : "date", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Date expression" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/div.json b/docs/reference/esql/functions/kibana/definition/div.json new file mode 100644 index 0000000000000..8bd2c33720d5f --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/div.json @@ -0,0 +1,189 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "div", + "description" : "Divide one number by another. If either field is <> then the result is `null`.", + "note" : "Division of two integer types will yield an integer result, rounding towards 0. If you need floating point division, <> one of the arguments to a `DOUBLE`.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/ends_with.json b/docs/reference/esql/functions/kibana/definition/ends_with.json index b43181817ef9e..754d2c965287b 100644 --- a/docs/reference/esql/functions/kibana/definition/ends_with.json +++ b/docs/reference/esql/functions/kibana/definition/ends_with.json @@ -22,6 +22,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "str", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + }, + { + "name" : "suffix", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "str", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + }, + { + "name" : "suffix", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json new file mode 100644 index 0000000000000..eca80ccdbf657 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/equals.json @@ -0,0 +1,405 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "equals", + "description" : "Check if two fields are equal. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "boolean", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "boolean", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "cartesian_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "cartesian_point", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "cartesian_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "cartesian_shape", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "geo_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "geo_point", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "geo_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "geo_shape", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json new file mode 100644 index 0000000000000..7831b0f41cd9d --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/greater_than.json @@ -0,0 +1,315 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "greater_than", + "description" : "Check if one field is greater than another. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json new file mode 100644 index 0000000000000..b6a40a838c393 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json @@ -0,0 +1,315 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "greater_than_or_equal", + "description" : "Check if one field is greater than or equal to another. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/in.json b/docs/reference/esql/functions/kibana/definition/in.json new file mode 100644 index 0000000000000..abf3bd64e2822 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/in.json @@ -0,0 +1,263 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "in", + "description" : "The `IN` operator allows testing whether a field or expression equals an element in a list of literals, fields or expressions.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "boolean", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "cartesian_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "cartesian_point", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "cartesian_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "cartesian_shape", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "double", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "geo_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "geo_point", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "geo_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "geo_shape", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "integer", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "ip", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "keyword", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "text", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "long", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "keyword", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "text", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "inlist", + "type" : "version", + "optional" : false, + "description" : "A list of items." + } + ], + "variadic" : true, + "returnType" : "boolean" + } + ], + "examples" : [ + "ROW a = 1, b = 4, c = 3\n| WHERE c-a IN (3, b / 2, a)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json new file mode 100644 index 0000000000000..bf6b9c5c08774 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/less_than.json @@ -0,0 +1,315 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "less_than", + "description" : "Check if one field is less than another. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json new file mode 100644 index 0000000000000..4e57161887141 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json @@ -0,0 +1,315 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "less_than_or_equal", + "description" : "Check if one field is less than or equal to another. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/like.json b/docs/reference/esql/functions/kibana/definition/like.json new file mode 100644 index 0000000000000..9a215ff88e399 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/like.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "like", + "description" : "Use `LIKE` to filter data based on string patterns using wildcards. `LIKE`\nusually acts on a field placed on the left-hand side of the operator, but it can\nalso act on a constant (literal) expression. The right-hand side of the operator\nrepresents the pattern.\n\nThe following wildcard characters are supported:\n\n* `*` matches zero or more characters.\n* `?` matches one character.", + "signatures" : [ + { + "params" : [ + { + "name" : "str", + "type" : "keyword", + "optional" : false, + "description" : "A literal expression." + }, + { + "name" : "pattern", + "type" : "keyword", + "optional" : false, + "description" : "Pattern." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "str", + "type" : "text", + "optional" : false, + "description" : "A literal expression." + }, + { + "name" : "pattern", + "type" : "text", + "optional" : false, + "description" : "Pattern." + } + ], + "variadic" : true, + "returnType" : "boolean" + } + ], + "examples" : [ + "FROM employees\n| WHERE first_name LIKE \"?b*\"\n| KEEP first_name, last_name" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 853cb9f9a97c3..b13d367d37345 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/median.json b/docs/reference/esql/functions/kibana/definition/median.json new file mode 100644 index 0000000000000..4887a4497e813 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/median.json @@ -0,0 +1,49 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "median", + "description" : "The value that is greater than half of all values and less than half of all values, also known as the 50% <>.", + "note" : "Like <>, `MEDIAN` is <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM employees\n| STATS MEDIAN(salary), PERCENTILE(salary, 50)", + "FROM employees\n| STATS median_max_salary_change = MEDIAN(MV_MAX(salary_change))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json b/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json new file mode 100644 index 0000000000000..4a8b1cd30611f --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/median_absolute_deviation.json @@ -0,0 +1,49 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "median_absolute_deviation", + "description" : "Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation.\n\nIt is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`.", + "note" : "Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM employees\n| STATS MEDIAN(salary), MEDIAN_ABSOLUTE_DEVIATION(salary)", + "FROM employees\n| STATS m_a_d_max_salary_change = MEDIAN_ABSOLUTE_DEVIATION(MV_MAX(salary_change))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 1c0c02eb9860f..338ed10d67b2e 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -64,6 +64,18 @@ "variadic" : false, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { @@ -75,6 +87,30 @@ ], "variadic" : false, "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mod.json b/docs/reference/esql/functions/kibana/definition/mod.json new file mode 100644 index 0000000000000..c43f697127249 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mod.json @@ -0,0 +1,188 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "mod", + "description" : "Divide one number by another and return the remainder. If either field is <> then the result is `null`.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/mul.json b/docs/reference/esql/functions/kibana/definition/mul.json new file mode 100644 index 0000000000000..9e42a55bdffef --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mul.json @@ -0,0 +1,188 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "mul", + "description" : "Multiply two numbers together. If either field is <> then the result is `null`.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index 8ee4e7297cc3a..3365226141f8f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -62,19 +62,19 @@ "params" : [ { "name" : "field1", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" }, { "name" : "field2", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "" } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_count.json b/docs/reference/esql/functions/kibana/definition/mv_count.json index d414e5b957495..f125327314f4e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_count.json +++ b/docs/reference/esql/functions/kibana/definition/mv_count.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 7ab287bc94d34..7d66e3dcc0b9b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -45,13 +45,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index e3141e800e4ad..de6e642068517 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index e55d66dbf8b93..ea1293e7acfec 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -44,13 +44,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 0783f6d6d5cbc..eb25369f78f77 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index cc23df386356e..87ad94338492e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -20,13 +20,13 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_percentile.json b/docs/reference/esql/functions/kibana/definition/mv_percentile.json new file mode 100644 index 0000000000000..dad611122f0db --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mv_percentile.json @@ -0,0 +1,173 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "mv_percentile", + "description" : "Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "double", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "integer", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "percentile", + "type" : "long", + "optional" : false, + "description" : "The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead." + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "ROW values = [5, 5, 10, 12, 5000]\n| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/mv_pseries_weighted_sum.json b/docs/reference/esql/functions/kibana/definition/mv_pseries_weighted_sum.json new file mode 100644 index 0000000000000..626f7befbb12e --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mv_pseries_weighted_sum.json @@ -0,0 +1,29 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "mv_pseries_weighted_sum", + "description" : "Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + }, + { + "name" : "p", + "type" : "double", + "optional" : false, + "description" : "It is a constant number that represents the 'p' parameter in the P-Series. It impacts every element's contribution to the weighted sum." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "ROW a = [70.0, 45.0, 21.0, 21.0, 21.0]\n| EVAL sum = MV_PSERIES_WEIGHTED_SUM(a, 1.5)\n| KEEP sum" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 30d0e1179dc89..ff52467b7d84a 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -80,7 +80,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index 28b4c9e8d6fea..d2bbd2c0fdbf4 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -26,7 +26,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Multivalue expression. If `null`, the function returns `null`." }, @@ -38,7 +38,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/neg.json b/docs/reference/esql/functions/kibana/definition/neg.json new file mode 100644 index 0000000000000..6a7e86fd8e1a8 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/neg.json @@ -0,0 +1,68 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "neg", + "description" : "Returns the negation of the argument.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time interval." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time interval." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time interval." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time interval." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time interval." + } + ], + "variadic" : false, + "returnType" : "time_duration" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/not_equals.json b/docs/reference/esql/functions/kibana/definition/not_equals.json new file mode 100644 index 0000000000000..4b4d22a5abef4 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/not_equals.json @@ -0,0 +1,405 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "not_equals", + "description" : "Check if two fields are unequal. If either field is <> then the result is `null`.", + "note" : "This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "boolean", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "boolean", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "cartesian_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "cartesian_point", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "cartesian_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "cartesian_shape", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "geo_point", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "geo_point", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "geo_shape", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "geo_shape", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "ip", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "keyword", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "text", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "version", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/now.json b/docs/reference/esql/functions/kibana/definition/now.json index 9cdb4945afa2e..1a2fc3a1dc42a 100644 --- a/docs/reference/esql/functions/kibana/definition/now.json +++ b/docs/reference/esql/functions/kibana/definition/now.json @@ -6,7 +6,7 @@ "signatures" : [ { "params" : [ ], - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/rlike.json b/docs/reference/esql/functions/kibana/definition/rlike.json new file mode 100644 index 0000000000000..09bd1e033fe10 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/rlike.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "rlike", + "description" : "Use `RLIKE` to filter data based on string patterns using using\n<>. `RLIKE` usually acts on a field placed on\nthe left-hand side of the operator, but it can also act on a constant (literal)\nexpression. The right-hand side of the operator represents the pattern.", + "signatures" : [ + { + "params" : [ + { + "name" : "str", + "type" : "keyword", + "optional" : false, + "description" : "A literal value." + }, + { + "name" : "pattern", + "type" : "keyword", + "optional" : false, + "description" : "A regular expression." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "str", + "type" : "text", + "optional" : false, + "description" : "A literal value." + }, + { + "name" : "pattern", + "type" : "text", + "optional" : false, + "description" : "A regular expression." + } + ], + "variadic" : true, + "returnType" : "boolean" + } + ], + "examples" : [ + "FROM employees\n| WHERE first_name RLIKE \".leja.*\"\n| KEEP first_name, last_name" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/st_centroid_agg.json b/docs/reference/esql/functions/kibana/definition/st_centroid_agg.json new file mode 100644 index 0000000000000..b01f91526709a --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/st_centroid_agg.json @@ -0,0 +1,35 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "st_centroid_agg", + "description" : "Calculate the spatial centroid over a field with spatial point geometry type.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "cartesian_point", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "cartesian_point" + }, + { + "params" : [ + { + "name" : "field", + "type" : "geo_point", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "geo_point" + } + ], + "examples" : [ + "FROM airports\n| STATS centroid=ST_CENTROID_AGG(location)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/starts_with.json b/docs/reference/esql/functions/kibana/definition/starts_with.json index b04e9c6837d46..bfa3e7b0e5e42 100644 --- a/docs/reference/esql/functions/kibana/definition/starts_with.json +++ b/docs/reference/esql/functions/kibana/definition/starts_with.json @@ -22,6 +22,42 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "str", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + }, + { + "name" : "prefix", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "str", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + }, + { + "name" : "prefix", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/sub.json b/docs/reference/esql/functions/kibana/definition/sub.json new file mode 100644 index 0000000000000..37e3852865e7f --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sub.json @@ -0,0 +1,260 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "sub", + "description" : "Subtract one number from another. If either field is <> then the result is `null`.", + "signatures" : [ + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "double", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "integer", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "unsigned_long", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/sum.json b/docs/reference/esql/functions/kibana/definition/sum.json new file mode 100644 index 0000000000000..b9235b6ba04de --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sum.json @@ -0,0 +1,48 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "sum", + "description" : "The sum of a numeric expression.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "long" + } + ], + "examples" : [ + "FROM employees\n| STATS SUM(languages)", + "FROM employees\n| STATS total_salary_changes = SUM(MV_MAX(salary_change))" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 10fcf8b22e8b0..032e8e1cbda34 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -3,18 +3,19 @@ "type" : "eval", "name" : "to_datetime", "description" : "Converts an input value to a date value.\nA string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.\nTo convert dates in other formats, use <>.", + "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded.", "signatures" : [ { "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -26,7 +27,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -38,7 +39,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -50,7 +51,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -62,7 +63,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -74,7 +75,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -86,7 +87,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_double.json b/docs/reference/esql/functions/kibana/definition/to_double.json index f4e414068db61..ae7e4832bfb3c 100644 --- a/docs/reference/esql/functions/kibana/definition/to_double.json +++ b/docs/reference/esql/functions/kibana/definition/to_double.json @@ -56,7 +56,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_integer.json b/docs/reference/esql/functions/kibana/definition/to_integer.json index 2776d8b29c412..5150d12936711 100644 --- a/docs/reference/esql/functions/kibana/definition/to_integer.json +++ b/docs/reference/esql/functions/kibana/definition/to_integer.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_long.json b/docs/reference/esql/functions/kibana/definition/to_long.json index e3218eba9642a..5fd4bce34e7e0 100644 --- a/docs/reference/esql/functions/kibana/definition/to_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_long.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_string.json b/docs/reference/esql/functions/kibana/definition/to_string.json index ef03cc06ea636..ea94171834908 100644 --- a/docs/reference/esql/functions/kibana/definition/to_string.json +++ b/docs/reference/esql/functions/kibana/definition/to_string.json @@ -44,7 +44,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json index d9cba641573fb..5521241224d61 100644 --- a/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_unsigned_long.json @@ -20,7 +20,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "Input value. The input can be a single- or multi-valued column or an expression." } diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 7502886e51c6c..c688bf5ea77c8 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -32,7 +32,7 @@ "params" : [ { "name" : "field", - "type" : "datetime", + "type" : "date", "optional" : false, "description" : "The field to collect the top values for." }, @@ -50,7 +50,7 @@ } ], "variadic" : false, - "returnType" : "datetime" + "returnType" : "date" }, { "params" : [ @@ -100,6 +100,30 @@ "variadic" : false, "returnType" : "integer" }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "The field to collect the top values for." + }, + { + "name" : "limit", + "type" : "integer", + "optional" : false, + "description" : "The maximum number of values to collect." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : false, + "description" : "The order to calculate the top values. Either `asc` or `desc`." + } + ], + "variadic" : false, + "returnType" : "ip" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json new file mode 100644 index 0000000000000..d9f37cd1ac83d --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -0,0 +1,119 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "values", + "description" : "Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. If you need the values returned in order use <>.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "boolean", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "date" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "field", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "field", + "type" : "ip", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "ip" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "field", + "type" : "version", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "version" + } + ], + "examples" : [ + " FROM employees\n| EVAL first_letter = SUBSTRING(first_name, 0, 1)\n| STATS first_name=MV_SORT(VALUES(first_name)) BY first_letter\n| SORT first_letter" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/weighted_avg.json b/docs/reference/esql/functions/kibana/definition/weighted_avg.json new file mode 100644 index 0000000000000..a04a5bff62bbb --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/weighted_avg.json @@ -0,0 +1,173 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "weighted_avg", + "description" : "The weighted average of a numeric expression.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "double", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "integer", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "long", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "double", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "integer", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "long", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "double", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "integer", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "A numeric value." + }, + { + "name" : "weight", + "type" : "long", + "optional" : false, + "description" : "A numeric weight." + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM employees\n| STATS w_avg = WEIGHTED_AVG(salary, height) by languages\n| EVAL w_avg = ROUND(w_avg)\n| KEEP w_avg, languages\n| SORT languages" + ] +} diff --git a/docs/reference/esql/functions/kibana/docs/add.md b/docs/reference/esql/functions/kibana/docs/add.md new file mode 100644 index 0000000000000..3f99bd4c77551 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/add.md @@ -0,0 +1,7 @@ + + +### ADD +Add two numbers together. If either field is <> then the result is `null`. + diff --git a/docs/reference/esql/functions/kibana/docs/count.md b/docs/reference/esql/functions/kibana/docs/count.md new file mode 100644 index 0000000000000..dc9c356a847ed --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/count.md @@ -0,0 +1,11 @@ + + +### COUNT +Returns the total number (count) of input values. + +``` +FROM employees +| STATS COUNT(height) +``` diff --git a/docs/reference/esql/functions/kibana/docs/count_distinct.md b/docs/reference/esql/functions/kibana/docs/count_distinct.md new file mode 100644 index 0000000000000..a6b451bf9d38d --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/count_distinct.md @@ -0,0 +1,11 @@ + + +### COUNT_DISTINCT +Returns the approximate number of distinct values. + +``` +FROM hosts +| STATS COUNT_DISTINCT(ip0), COUNT_DISTINCT(ip1) +``` diff --git a/docs/reference/esql/functions/kibana/docs/div.md b/docs/reference/esql/functions/kibana/docs/div.md new file mode 100644 index 0000000000000..a8b7b4e58f376 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/div.md @@ -0,0 +1,8 @@ + + +### DIV +Divide one number by another. If either field is <> then the result is `null`. + +Note: Division of two integer types will yield an integer result, rounding towards 0. If you need floating point division, <> one of the arguments to a `DOUBLE`. diff --git a/docs/reference/esql/functions/kibana/docs/equals.md b/docs/reference/esql/functions/kibana/docs/equals.md new file mode 100644 index 0000000000000..b8fcb72c2ccd5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/equals.md @@ -0,0 +1,8 @@ + + +### EQUALS +Check if two fields are equal. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/greater_than.md b/docs/reference/esql/functions/kibana/docs/greater_than.md new file mode 100644 index 0000000000000..67f99eda3aed7 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/greater_than.md @@ -0,0 +1,8 @@ + + +### GREATER_THAN +Check if one field is greater than another. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/greater_than_or_equal.md b/docs/reference/esql/functions/kibana/docs/greater_than_or_equal.md new file mode 100644 index 0000000000000..73d3ac6371b07 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/greater_than_or_equal.md @@ -0,0 +1,8 @@ + + +### GREATER_THAN_OR_EQUAL +Check if one field is greater than or equal to another. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/in.md b/docs/reference/esql/functions/kibana/docs/in.md new file mode 100644 index 0000000000000..e096146374f38 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/in.md @@ -0,0 +1,11 @@ + + +### IN +The `IN` operator allows testing whether a field or expression equals an element in a list of literals, fields or expressions. + +``` +ROW a = 1, b = 4, c = 3 +| WHERE c-a IN (3, b / 2, a) +``` diff --git a/docs/reference/esql/functions/kibana/docs/less_than.md b/docs/reference/esql/functions/kibana/docs/less_than.md new file mode 100644 index 0000000000000..0d171d06c68d3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/less_than.md @@ -0,0 +1,8 @@ + + +### LESS_THAN +Check if one field is less than another. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/less_than_or_equal.md b/docs/reference/esql/functions/kibana/docs/less_than_or_equal.md new file mode 100644 index 0000000000000..acb92288c2c46 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/less_than_or_equal.md @@ -0,0 +1,8 @@ + + +### LESS_THAN_OR_EQUAL +Check if one field is less than or equal to another. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/like.md b/docs/reference/esql/functions/kibana/docs/like.md new file mode 100644 index 0000000000000..4c400bdc65479 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/like.md @@ -0,0 +1,20 @@ + + +### LIKE +Use `LIKE` to filter data based on string patterns using wildcards. `LIKE` +usually acts on a field placed on the left-hand side of the operator, but it can +also act on a constant (literal) expression. The right-hand side of the operator +represents the pattern. + +The following wildcard characters are supported: + +* `*` matches zero or more characters. +* `?` matches one character. + +``` +FROM employees +| WHERE first_name LIKE "?b*" +| KEEP first_name, last_name +``` diff --git a/docs/reference/esql/functions/kibana/docs/median.md b/docs/reference/esql/functions/kibana/docs/median.md new file mode 100644 index 0000000000000..7a4370b4d2551 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/median.md @@ -0,0 +1,12 @@ + + +### MEDIAN +The value that is greater than half of all values and less than half of all values, also known as the 50% <>. + +``` +FROM employees +| STATS MEDIAN(salary), PERCENTILE(salary, 50) +``` +Note: Like <>, `MEDIAN` is <>. diff --git a/docs/reference/esql/functions/kibana/docs/median_absolute_deviation.md b/docs/reference/esql/functions/kibana/docs/median_absolute_deviation.md new file mode 100644 index 0000000000000..8db113deb2c49 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/median_absolute_deviation.md @@ -0,0 +1,14 @@ + + +### MEDIAN_ABSOLUTE_DEVIATION +Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation. + +It is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. + +``` +FROM employees +| STATS MEDIAN(salary), MEDIAN_ABSOLUTE_DEVIATION(salary) +``` +Note: Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is <>. diff --git a/docs/reference/esql/functions/kibana/docs/mod.md b/docs/reference/esql/functions/kibana/docs/mod.md new file mode 100644 index 0000000000000..e6b3c92406072 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mod.md @@ -0,0 +1,7 @@ + + +### MOD +Divide one number by another and return the remainder. If either field is <> then the result is `null`. + diff --git a/docs/reference/esql/functions/kibana/docs/mul.md b/docs/reference/esql/functions/kibana/docs/mul.md new file mode 100644 index 0000000000000..3f24f3b1a67bb --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mul.md @@ -0,0 +1,7 @@ + + +### MUL +Multiply two numbers together. If either field is <> then the result is `null`. + diff --git a/docs/reference/esql/functions/kibana/docs/mv_percentile.md b/docs/reference/esql/functions/kibana/docs/mv_percentile.md new file mode 100644 index 0000000000000..560a0aefa1dc3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mv_percentile.md @@ -0,0 +1,11 @@ + + +### MV_PERCENTILE +Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. + +``` +ROW values = [5, 5, 10, 12, 5000] +| EVAL p50 = MV_PERCENTILE(values, 50), median = MV_MEDIAN(values) +``` diff --git a/docs/reference/esql/functions/kibana/docs/mv_pseries_weighted_sum.md b/docs/reference/esql/functions/kibana/docs/mv_pseries_weighted_sum.md new file mode 100644 index 0000000000000..fbeb310449b9b --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mv_pseries_weighted_sum.md @@ -0,0 +1,12 @@ + + +### MV_PSERIES_WEIGHTED_SUM +Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum. + +``` +ROW a = [70.0, 45.0, 21.0, 21.0, 21.0] +| EVAL sum = MV_PSERIES_WEIGHTED_SUM(a, 1.5) +| KEEP sum +``` diff --git a/docs/reference/esql/functions/kibana/docs/neg.md b/docs/reference/esql/functions/kibana/docs/neg.md new file mode 100644 index 0000000000000..2d1c65487343f --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/neg.md @@ -0,0 +1,7 @@ + + +### NEG +Returns the negation of the argument. + diff --git a/docs/reference/esql/functions/kibana/docs/not_equals.md b/docs/reference/esql/functions/kibana/docs/not_equals.md new file mode 100644 index 0000000000000..cff2130e766ed --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/not_equals.md @@ -0,0 +1,8 @@ + + +### NOT_EQUALS +Check if two fields are unequal. If either field is <> then the result is `null`. + +Note: This is pushed to the underlying search index if one side of the comparison is constant and the other side is a field in the index that has both an <> and <>. diff --git a/docs/reference/esql/functions/kibana/docs/rlike.md b/docs/reference/esql/functions/kibana/docs/rlike.md new file mode 100644 index 0000000000000..ed94553e7e44f --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/rlike.md @@ -0,0 +1,15 @@ + + +### RLIKE +Use `RLIKE` to filter data based on string patterns using using +<>. `RLIKE` usually acts on a field placed on +the left-hand side of the operator, but it can also act on a constant (literal) +expression. The right-hand side of the operator represents the pattern. + +``` +FROM employees +| WHERE first_name RLIKE ".leja.*" +| KEEP first_name, last_name +``` diff --git a/docs/reference/esql/functions/kibana/docs/st_centroid_agg.md b/docs/reference/esql/functions/kibana/docs/st_centroid_agg.md new file mode 100644 index 0000000000000..306a32a309a64 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/st_centroid_agg.md @@ -0,0 +1,11 @@ + + +### ST_CENTROID_AGG +Calculate the spatial centroid over a field with spatial point geometry type. + +``` +FROM airports +| STATS centroid=ST_CENTROID_AGG(location) +``` diff --git a/docs/reference/esql/functions/kibana/docs/sub.md b/docs/reference/esql/functions/kibana/docs/sub.md new file mode 100644 index 0000000000000..10746ed81cfe3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sub.md @@ -0,0 +1,7 @@ + + +### SUB +Subtract one number from another. If either field is <> then the result is `null`. + diff --git a/docs/reference/esql/functions/kibana/docs/sum.md b/docs/reference/esql/functions/kibana/docs/sum.md new file mode 100644 index 0000000000000..eb72ddb0dece1 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sum.md @@ -0,0 +1,11 @@ + + +### SUM +The sum of a numeric expression. + +``` +FROM employees +| STATS SUM(languages) +``` diff --git a/docs/reference/esql/functions/kibana/docs/to_datetime.md b/docs/reference/esql/functions/kibana/docs/to_datetime.md index 5e8f9c72adc2c..c194dfd17871a 100644 --- a/docs/reference/esql/functions/kibana/docs/to_datetime.md +++ b/docs/reference/esql/functions/kibana/docs/to_datetime.md @@ -11,3 +11,4 @@ To convert dates in other formats, use <>. ROW string = ["1953-09-02T00:00:00.000Z", "1964-06-02T00:00:00.000Z", "1964-06-02 00:00:00"] | EVAL datetime = TO_DATETIME(string) ``` +Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date is truncated, not rounded. diff --git a/docs/reference/esql/functions/kibana/docs/values.md b/docs/reference/esql/functions/kibana/docs/values.md new file mode 100644 index 0000000000000..cba62fc27255e --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/values.md @@ -0,0 +1,13 @@ + + +### VALUES +Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. If you need the values returned in order use <>. + +``` + FROM employees +| EVAL first_letter = SUBSTRING(first_name, 0, 1) +| STATS first_name=MV_SORT(VALUES(first_name)) BY first_letter +| SORT first_letter +``` diff --git a/docs/reference/esql/functions/kibana/docs/weighted_avg.md b/docs/reference/esql/functions/kibana/docs/weighted_avg.md new file mode 100644 index 0000000000000..6b0b2cc8cd287 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/weighted_avg.md @@ -0,0 +1,14 @@ + + +### WEIGHTED_AVG +The weighted average of a numeric expression. + +``` +FROM employees +| STATS w_avg = WEIGHTED_AVG(salary, height) by languages +| EVAL w_avg = ROUND(w_avg) +| KEEP w_avg, languages +| SORT languages +``` diff --git a/docs/reference/esql/functions/layout/count.asciidoc b/docs/reference/esql/functions/layout/count.asciidoc new file mode 100644 index 0000000000000..8c16d74cde9a7 --- /dev/null +++ b/docs/reference/esql/functions/layout/count.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-count]] +=== `COUNT` + +*Syntax* + +[.text-center] +image::esql/functions/signature/count.svg[Embedded,opts=inline] + +include::../parameters/count.asciidoc[] +include::../description/count.asciidoc[] +include::../types/count.asciidoc[] +include::../examples/count.asciidoc[] diff --git a/docs/reference/esql/functions/layout/count_distinct.asciidoc b/docs/reference/esql/functions/layout/count_distinct.asciidoc new file mode 100644 index 0000000000000..2c9848186e806 --- /dev/null +++ b/docs/reference/esql/functions/layout/count_distinct.asciidoc @@ -0,0 +1,16 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-count_distinct]] +=== `COUNT_DISTINCT` + +*Syntax* + +[.text-center] +image::esql/functions/signature/count_distinct.svg[Embedded,opts=inline] + +include::../parameters/count_distinct.asciidoc[] +include::../description/count_distinct.asciidoc[] +include::../types/count_distinct.asciidoc[] +include::../examples/count_distinct.asciidoc[] +include::../appendix/count_distinct.asciidoc[] diff --git a/docs/reference/esql/functions/layout/median.asciidoc b/docs/reference/esql/functions/layout/median.asciidoc new file mode 100644 index 0000000000000..c03e73523983d --- /dev/null +++ b/docs/reference/esql/functions/layout/median.asciidoc @@ -0,0 +1,16 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-median]] +=== `MEDIAN` + +*Syntax* + +[.text-center] +image::esql/functions/signature/median.svg[Embedded,opts=inline] + +include::../parameters/median.asciidoc[] +include::../description/median.asciidoc[] +include::../types/median.asciidoc[] +include::../examples/median.asciidoc[] +include::../appendix/median.asciidoc[] diff --git a/docs/reference/esql/functions/layout/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/layout/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..b558e6f11d9d0 --- /dev/null +++ b/docs/reference/esql/functions/layout/median_absolute_deviation.asciidoc @@ -0,0 +1,16 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-median_absolute_deviation]] +=== `MEDIAN_ABSOLUTE_DEVIATION` + +*Syntax* + +[.text-center] +image::esql/functions/signature/median_absolute_deviation.svg[Embedded,opts=inline] + +include::../parameters/median_absolute_deviation.asciidoc[] +include::../description/median_absolute_deviation.asciidoc[] +include::../types/median_absolute_deviation.asciidoc[] +include::../examples/median_absolute_deviation.asciidoc[] +include::../appendix/median_absolute_deviation.asciidoc[] diff --git a/docs/reference/esql/functions/layout/mv_percentile.asciidoc b/docs/reference/esql/functions/layout/mv_percentile.asciidoc new file mode 100644 index 0000000000000..a86c4a136b5cd --- /dev/null +++ b/docs/reference/esql/functions/layout/mv_percentile.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-mv_percentile]] +=== `MV_PERCENTILE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_percentile.svg[Embedded,opts=inline] + +include::../parameters/mv_percentile.asciidoc[] +include::../description/mv_percentile.asciidoc[] +include::../types/mv_percentile.asciidoc[] +include::../examples/mv_percentile.asciidoc[] diff --git a/docs/reference/esql/functions/layout/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/layout/mv_pseries_weighted_sum.asciidoc new file mode 100644 index 0000000000000..7c14ecbc3c935 --- /dev/null +++ b/docs/reference/esql/functions/layout/mv_pseries_weighted_sum.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-mv_pseries_weighted_sum]] +=== `MV_PSERIES_WEIGHTED_SUM` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_pseries_weighted_sum.svg[Embedded,opts=inline] + +include::../parameters/mv_pseries_weighted_sum.asciidoc[] +include::../description/mv_pseries_weighted_sum.asciidoc[] +include::../types/mv_pseries_weighted_sum.asciidoc[] +include::../examples/mv_pseries_weighted_sum.asciidoc[] diff --git a/docs/reference/esql/functions/layout/st_centroid_agg.asciidoc b/docs/reference/esql/functions/layout/st_centroid_agg.asciidoc new file mode 100644 index 0000000000000..6626c162f3b06 --- /dev/null +++ b/docs/reference/esql/functions/layout/st_centroid_agg.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-st_centroid_agg]] +=== `ST_CENTROID_AGG` + +*Syntax* + +[.text-center] +image::esql/functions/signature/st_centroid_agg.svg[Embedded,opts=inline] + +include::../parameters/st_centroid_agg.asciidoc[] +include::../description/st_centroid_agg.asciidoc[] +include::../types/st_centroid_agg.asciidoc[] +include::../examples/st_centroid_agg.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sum.asciidoc b/docs/reference/esql/functions/layout/sum.asciidoc new file mode 100644 index 0000000000000..abac1fdd27b6e --- /dev/null +++ b/docs/reference/esql/functions/layout/sum.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sum]] +=== `SUM` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sum.svg[Embedded,opts=inline] + +include::../parameters/sum.asciidoc[] +include::../description/sum.asciidoc[] +include::../types/sum.asciidoc[] +include::../examples/sum.asciidoc[] diff --git a/docs/reference/esql/functions/layout/values.asciidoc b/docs/reference/esql/functions/layout/values.asciidoc new file mode 100644 index 0000000000000..7d90d4314699a --- /dev/null +++ b/docs/reference/esql/functions/layout/values.asciidoc @@ -0,0 +1,18 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-values]] +=== `VALUES` + +preview::["Do not use `VALUES` on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +*Syntax* + +[.text-center] +image::esql/functions/signature/values.svg[Embedded,opts=inline] + +include::../parameters/values.asciidoc[] +include::../description/values.asciidoc[] +include::../types/values.asciidoc[] +include::../examples/values.asciidoc[] +include::../appendix/values.asciidoc[] diff --git a/docs/reference/esql/functions/layout/weighted_avg.asciidoc b/docs/reference/esql/functions/layout/weighted_avg.asciidoc new file mode 100644 index 0000000000000..84db8f1269b94 --- /dev/null +++ b/docs/reference/esql/functions/layout/weighted_avg.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-weighted_avg]] +=== `WEIGHTED_AVG` + +*Syntax* + +[.text-center] +image::esql/functions/signature/weighted_avg.svg[Embedded,opts=inline] + +include::../parameters/weighted_avg.asciidoc[] +include::../description/weighted_avg.asciidoc[] +include::../types/weighted_avg.asciidoc[] +include::../examples/weighted_avg.asciidoc[] diff --git a/docs/reference/esql/functions/median-absolute-deviation.asciidoc b/docs/reference/esql/functions/median-absolute-deviation.asciidoc deleted file mode 100644 index b4f80cced06f6..0000000000000 --- a/docs/reference/esql/functions/median-absolute-deviation.asciidoc +++ /dev/null @@ -1,60 +0,0 @@ -[discrete] -[[esql-agg-median-absolute-deviation]] -=== `MEDIAN_ABSOLUTE_DEVIATION` - -*Syntax* - -[source,esql] ----- -MEDIAN_ABSOLUTE_DEVIATION(expression) ----- - -*Parameters* - -`expression`:: -Expression from which to return the median absolute deviation. - -*Description* - -Returns the median absolute deviation, a measure of variability. It is a robust -statistic, meaning that it is useful for describing data that may have outliers, -or may not be normally distributed. For such data it can be more descriptive -than standard deviation. - -It is calculated as the median of each data point's deviation from the median of -the entire sample. That is, for a random variable `X`, the median absolute -deviation is `median(|median(X) - X|)`. - -NOTE: Like <>, `MEDIAN_ABSOLUTE_DEVIATION` is - <>. - -[WARNING] -==== -`MEDIAN_ABSOLUTE_DEVIATION` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic]. -This means you can get slightly different results using the same data. -==== - -*Example* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation-result] -|=== - -The expression can use inline functions. For example, to calculate the the -median absolute deviation of the maximum values of a multivalued column, first -use `MV_MAX` to get the maximum value per row, and use the result with the -`MEDIAN_ABSOLUTE_DEVIATION` function: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression-result] -|=== diff --git a/docs/reference/esql/functions/median.asciidoc b/docs/reference/esql/functions/median.asciidoc deleted file mode 100644 index 2f7d70775e38e..0000000000000 --- a/docs/reference/esql/functions/median.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -[discrete] -[[esql-agg-median]] -=== `MEDIAN` - -*Syntax* - -[source,esql] ----- -MEDIAN(expression) ----- - -*Parameters* - -`expression`:: -Expression from which to return the median value. - -*Description* - -Returns the value that is greater than half of all values and less than half of -all values, also known as the 50% <>. - -NOTE: Like <>, `MEDIAN` is <>. - -[WARNING] -==== -`MEDIAN` is also {wikipedia}/Nondeterministic_algorithm[non-deterministic]. -This means you can get slightly different results using the same data. -==== - -*Example* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_percentile.csv-spec[tag=median] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_percentile.csv-spec[tag=median-result] -|=== - -The expression can use inline functions. For example, to calculate the median of -the maximum values of a multivalued column, first use `MV_MAX` to get the -maximum value per row, and use the result with the `MEDIAN` function: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMedianNestedExpression-result] -|=== diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index 0f4f6233d446c..bd5f14cdd3557 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -18,6 +18,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -34,6 +35,7 @@ include::layout/mv_last.asciidoc[] include::layout/mv_max.asciidoc[] include::layout/mv_median.asciidoc[] include::layout/mv_min.asciidoc[] +include::layout/mv_pseries_weighted_sum.asciidoc[] include::layout/mv_slice.asciidoc[] include::layout/mv_sort.asciidoc[] include::layout/mv_sum.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/bucket.asciidoc b/docs/reference/esql/functions/parameters/bucket.asciidoc index 39aac14aaa36d..09c720d6095f3 100644 --- a/docs/reference/esql/functions/parameters/bucket.asciidoc +++ b/docs/reference/esql/functions/parameters/bucket.asciidoc @@ -6,10 +6,10 @@ Numeric or date expression from which to derive buckets. `buckets`:: -Target number of buckets. +Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted. `from`:: -Start of the range. Can be a number or a date expressed as a string. +Start of the range. Can be a number, a date or a date expressed as a string. `to`:: -End of the range. Can be a number or a date expressed as a string. +End of the range. Can be a number, a date or a date expressed as a string. diff --git a/docs/reference/esql/functions/parameters/count.asciidoc b/docs/reference/esql/functions/parameters/count.asciidoc new file mode 100644 index 0000000000000..d470061a83e2e --- /dev/null +++ b/docs/reference/esql/functions/parameters/count.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows). diff --git a/docs/reference/esql/functions/parameters/count_distinct.asciidoc b/docs/reference/esql/functions/parameters/count_distinct.asciidoc new file mode 100644 index 0000000000000..f84cf27c3e075 --- /dev/null +++ b/docs/reference/esql/functions/parameters/count_distinct.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Column or literal for which to count the number of distinct values. + +`precision`:: +Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000. diff --git a/docs/reference/esql/functions/parameters/median.asciidoc b/docs/reference/esql/functions/parameters/median.asciidoc new file mode 100644 index 0000000000000..91c56709d182a --- /dev/null +++ b/docs/reference/esql/functions/parameters/median.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: + diff --git a/docs/reference/esql/functions/parameters/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/parameters/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..91c56709d182a --- /dev/null +++ b/docs/reference/esql/functions/parameters/median_absolute_deviation.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: + diff --git a/docs/reference/esql/functions/parameters/mv_percentile.asciidoc b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc new file mode 100644 index 0000000000000..57804185e191a --- /dev/null +++ b/docs/reference/esql/functions/parameters/mv_percentile.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Multivalue expression. + +`percentile`:: +The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead. diff --git a/docs/reference/esql/functions/parameters/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/parameters/mv_pseries_weighted_sum.asciidoc new file mode 100644 index 0000000000000..3a828f1464824 --- /dev/null +++ b/docs/reference/esql/functions/parameters/mv_pseries_weighted_sum.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Multivalue expression. + +`p`:: +It is a constant number that represents the 'p' parameter in the P-Series. It impacts every element's contribution to the weighted sum. diff --git a/docs/reference/esql/functions/parameters/st_centroid_agg.asciidoc b/docs/reference/esql/functions/parameters/st_centroid_agg.asciidoc new file mode 100644 index 0000000000000..8903aa1a472a3 --- /dev/null +++ b/docs/reference/esql/functions/parameters/st_centroid_agg.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: + diff --git a/docs/reference/esql/functions/parameters/sum.asciidoc b/docs/reference/esql/functions/parameters/sum.asciidoc new file mode 100644 index 0000000000000..91c56709d182a --- /dev/null +++ b/docs/reference/esql/functions/parameters/sum.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: + diff --git a/docs/reference/esql/functions/parameters/values.asciidoc b/docs/reference/esql/functions/parameters/values.asciidoc new file mode 100644 index 0000000000000..8903aa1a472a3 --- /dev/null +++ b/docs/reference/esql/functions/parameters/values.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: + diff --git a/docs/reference/esql/functions/parameters/weighted_avg.asciidoc b/docs/reference/esql/functions/parameters/weighted_avg.asciidoc new file mode 100644 index 0000000000000..280dd8ca347d4 --- /dev/null +++ b/docs/reference/esql/functions/parameters/weighted_avg.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +A numeric value. + +`weight`:: +A numeric weight. diff --git a/docs/reference/esql/functions/round.asciidoc b/docs/reference/esql/functions/round.asciidoc deleted file mode 100644 index e792db6c1ed69..0000000000000 --- a/docs/reference/esql/functions/round.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[discrete] -[[esql-round]] -=== `ROUND` -*Syntax* - -[.text-center] -image::esql/functions/signature/round.svg[Embedded,opts=inline] - -*Parameters* - -`value`:: -Numeric expression. If `null`, the function returns `null`. - -`decimals`:: -Numeric expression. If `null`, the function returns `null`. - -*Description* - -Rounds a number to the closest number with the specified number of digits. -Defaults to 0 digits if no number of digits is provided. If the specified number -of digits is negative, rounds to the number of digits left of the decimal point. - -include::types/round.asciidoc[] - -*Example* - -[source.merge.styled,esql] ----- -include::{esql-specs}/docs.csv-spec[tag=round] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/docs.csv-spec[tag=round-result] -|=== diff --git a/docs/reference/esql/functions/signature/count.svg b/docs/reference/esql/functions/signature/count.svg new file mode 100644 index 0000000000000..9b19652b98788 --- /dev/null +++ b/docs/reference/esql/functions/signature/count.svg @@ -0,0 +1 @@ +COUNT(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/count_distinct.svg b/docs/reference/esql/functions/signature/count_distinct.svg new file mode 100644 index 0000000000000..a5b77da7c555a --- /dev/null +++ b/docs/reference/esql/functions/signature/count_distinct.svg @@ -0,0 +1 @@ +COUNT_DISTINCT(field,precision) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/median.svg b/docs/reference/esql/functions/signature/median.svg new file mode 100644 index 0000000000000..c61b3a9e77817 --- /dev/null +++ b/docs/reference/esql/functions/signature/median.svg @@ -0,0 +1 @@ +MEDIAN(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/median_absolute_deviation.svg b/docs/reference/esql/functions/signature/median_absolute_deviation.svg new file mode 100644 index 0000000000000..bcf01de52ac12 --- /dev/null +++ b/docs/reference/esql/functions/signature/median_absolute_deviation.svg @@ -0,0 +1 @@ +MEDIAN_ABSOLUTE_DEVIATION(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_percentile.svg b/docs/reference/esql/functions/signature/mv_percentile.svg new file mode 100644 index 0000000000000..b4d623636572f --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_percentile.svg @@ -0,0 +1 @@ +MV_PERCENTILE(number,percentile) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_pseries_weighted_sum.svg b/docs/reference/esql/functions/signature/mv_pseries_weighted_sum.svg new file mode 100644 index 0000000000000..7e3b42161e52c --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_pseries_weighted_sum.svg @@ -0,0 +1 @@ +MV_PSERIES_WEIGHTED_SUM(number,p) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/st_centroid_agg.svg b/docs/reference/esql/functions/signature/st_centroid_agg.svg new file mode 100644 index 0000000000000..45d509614a526 --- /dev/null +++ b/docs/reference/esql/functions/signature/st_centroid_agg.svg @@ -0,0 +1 @@ +ST_CENTROID_AGG(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sum.svg b/docs/reference/esql/functions/signature/sum.svg new file mode 100644 index 0000000000000..d1024edc2a5b9 --- /dev/null +++ b/docs/reference/esql/functions/signature/sum.svg @@ -0,0 +1 @@ +SUM(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/values.svg b/docs/reference/esql/functions/signature/values.svg new file mode 100644 index 0000000000000..0fa116ce1eb14 --- /dev/null +++ b/docs/reference/esql/functions/signature/values.svg @@ -0,0 +1 @@ +VALUES(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/weighted_avg.svg b/docs/reference/esql/functions/signature/weighted_avg.svg new file mode 100644 index 0000000000000..581e8cd79b0c2 --- /dev/null +++ b/docs/reference/esql/functions/signature/weighted_avg.svg @@ -0,0 +1 @@ +WEIGHTED_AVG(number,weight) \ No newline at end of file diff --git a/docs/reference/esql/functions/st_centroid_agg.asciidoc b/docs/reference/esql/functions/st_centroid_agg.asciidoc deleted file mode 100644 index c980560f8f198..0000000000000 --- a/docs/reference/esql/functions/st_centroid_agg.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -[discrete] -[[esql-agg-st-centroid]] -=== `ST_CENTROID_AGG` - -experimental::[] - -Calculate the spatial centroid over a field with spatial point geometry type. - -[source.merge.styled,esql] ----- -include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/spatial.csv-spec[tag=st_centroid_agg-airports-result] -|=== - -Supported types: - -[%header.monospaced.styled,format=dsv,separator=|] -|=== -v | result -geo_point | geo_point -cartesian_point | cartesian_point -|=== diff --git a/docs/reference/esql/functions/sum.asciidoc b/docs/reference/esql/functions/sum.asciidoc deleted file mode 100644 index efe65d5503ec6..0000000000000 --- a/docs/reference/esql/functions/sum.asciidoc +++ /dev/null @@ -1,41 +0,0 @@ -[discrete] -[[esql-agg-sum]] -=== `SUM` - -*Syntax* - -[source,esql] ----- -SUM(expression) ----- - -`expression`:: -Numeric expression. - -*Description* - -Returns the sum of a numeric expression. - -*Example* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=sum] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=sum-result] -|=== - -The expression can use inline functions. For example, to calculate -the sum of each employee's maximum salary changes, apply the -`MV_MAX` function to each row and then sum the results: - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=docsStatsSumNestedExpression-result] -|=== diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index a0215a803d4e3..54d1aec463c1a 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -5,10 +5,10 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date +date_period | date | date date_period | date_period | date_period -date_period | datetime | datetime -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double @@ -18,7 +18,7 @@ integer | long | long long | double | double long | integer | long long | long | long -time_duration | datetime | datetime +time_duration | date | date time_duration | time_duration | time_duration unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/bucket.asciidoc b/docs/reference/esql/functions/types/bucket.asciidoc index d1ce8e499eb07..172e84b6f7860 100644 --- a/docs/reference/esql/functions/types/bucket.asciidoc +++ b/docs/reference/esql/functions/types/bucket.asciidoc @@ -5,9 +5,17 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | buckets | from | to | result -datetime | date_period | | | datetime -datetime | integer | datetime | datetime | datetime -datetime | time_duration | | | datetime +date | date_period | | | date +date | integer | date | date | date +date | integer | date | keyword | date +date | integer | date | text | date +date | integer | keyword | date | date +date | integer | keyword | keyword | date +date | integer | keyword | text | date +date | integer | text | date | date +date | integer | text | keyword | date +date | integer | text | text | date +date | time_duration | | | date double | double | | | double double | integer | double | double | double double | integer | double | integer | double @@ -18,6 +26,8 @@ double | integer | integer | long | double double | integer | long | double | double double | integer | long | integer | double double | integer | long | long | double +double | integer | | | double +double | long | | | double integer | double | | | double integer | integer | double | double | double integer | integer | double | integer | double @@ -28,6 +38,8 @@ integer | integer | integer | long | double integer | integer | long | double | double integer | integer | long | integer | double integer | integer | long | long | double +integer | integer | | | double +integer | long | | | double long | double | | | double long | integer | double | double | double long | integer | double | integer | double @@ -38,4 +50,6 @@ long | integer | integer | long | double long | integer | long | double | double long | integer | long | integer | double long | integer | long | long | double +long | integer | | | double +long | long | | | double |=== diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index 85e4193b5bf2f..f6c8cfe9361d1 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -7,7 +7,7 @@ condition | trueValue | result boolean | boolean | boolean boolean | cartesian_point | cartesian_point -boolean | datetime | datetime +boolean | date | date boolean | double | double boolean | geo_point | geo_point boolean | integer | integer diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 841d836f6837e..368a12db0dca4 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -9,7 +9,7 @@ boolean | boolean | boolean boolean | | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape integer | integer | integer diff --git a/docs/reference/esql/functions/types/count.asciidoc b/docs/reference/esql/functions/types/count.asciidoc new file mode 100644 index 0000000000000..959c94c1ec358 --- /dev/null +++ b/docs/reference/esql/functions/types/count.asciidoc @@ -0,0 +1,20 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +boolean | long +cartesian_point | long +date | long +double | long +geo_point | long +integer | long +ip | long +keyword | long +long | long +text | long +unsigned_long | long +version | long +|=== diff --git a/docs/reference/esql/functions/types/count_distinct.asciidoc b/docs/reference/esql/functions/types/count_distinct.asciidoc new file mode 100644 index 0000000000000..c365c8814573c --- /dev/null +++ b/docs/reference/esql/functions/types/count_distinct.asciidoc @@ -0,0 +1,44 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | precision | result +boolean | integer | long +boolean | long | long +boolean | unsigned_long | long +boolean | | long +date | integer | long +date | long | long +date | unsigned_long | long +date | | long +double | integer | long +double | long | long +double | unsigned_long | long +double | | long +integer | integer | long +integer | long | long +integer | unsigned_long | long +integer | | long +ip | integer | long +ip | long | long +ip | unsigned_long | long +ip | | long +keyword | integer | long +keyword | long | long +keyword | unsigned_long | long +keyword | | long +long | integer | long +long | long | long +long | unsigned_long | long +long | | long +text | integer | long +text | long | long +text | unsigned_long | long +text | | long +version | integer | long +version | long | long +version | unsigned_long | long +version | | long +|=== diff --git a/docs/reference/esql/functions/types/date_diff.asciidoc b/docs/reference/esql/functions/types/date_diff.asciidoc index 98adcef51e75c..b0a4818f412ac 100644 --- a/docs/reference/esql/functions/types/date_diff.asciidoc +++ b/docs/reference/esql/functions/types/date_diff.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== unit | startTimestamp | endTimestamp | result -keyword | datetime | datetime | integer -text | datetime | datetime | integer +keyword | date | date | integer +text | date | date | integer |=== diff --git a/docs/reference/esql/functions/types/date_extract.asciidoc b/docs/reference/esql/functions/types/date_extract.asciidoc index 43702ef0671a7..ec9bf70c221cc 100644 --- a/docs/reference/esql/functions/types/date_extract.asciidoc +++ b/docs/reference/esql/functions/types/date_extract.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePart | date | result -keyword | datetime | long -text | datetime | long +keyword | date | long +text | date | long |=== diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc index a76f38653b9b8..b2e97dfa8835a 100644 --- a/docs/reference/esql/functions/types/date_format.asciidoc +++ b/docs/reference/esql/functions/types/date_format.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== dateFormat | date | result -keyword | datetime | keyword -text | datetime | keyword +keyword | date | keyword +text | date | keyword |=== diff --git a/docs/reference/esql/functions/types/date_parse.asciidoc b/docs/reference/esql/functions/types/date_parse.asciidoc index 314d02eb06271..f3eab18309dd8 100644 --- a/docs/reference/esql/functions/types/date_parse.asciidoc +++ b/docs/reference/esql/functions/types/date_parse.asciidoc @@ -5,8 +5,8 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== datePattern | dateString | result -keyword | keyword | datetime -keyword | text | datetime -text | keyword | datetime -text | text | datetime +keyword | keyword | date +keyword | text | date +text | keyword | date +text | text | date |=== diff --git a/docs/reference/esql/functions/types/date_trunc.asciidoc b/docs/reference/esql/functions/types/date_trunc.asciidoc index 8df45cfef54a8..aa7dee99c6c44 100644 --- a/docs/reference/esql/functions/types/date_trunc.asciidoc +++ b/docs/reference/esql/functions/types/date_trunc.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== interval | date | result -date_period | datetime | datetime -time_duration | datetime | datetime +date_period | date | date +time_duration | date | date |=== diff --git a/docs/reference/esql/functions/types/ends_with.asciidoc b/docs/reference/esql/functions/types/ends_with.asciidoc index a0236634bbf01..3e8e77448ce89 100644 --- a/docs/reference/esql/functions/types/ends_with.asciidoc +++ b/docs/reference/esql/functions/types/ends_with.asciidoc @@ -6,5 +6,7 @@ |=== str | suffix | result keyword | keyword | boolean +keyword | text | boolean +text | keyword | boolean text | text | boolean |=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/equals.asciidoc +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than.asciidoc +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/in.asciidoc b/docs/reference/esql/functions/types/in.asciidoc new file mode 100644 index 0000000000000..6ed2c250ef0ac --- /dev/null +++ b/docs/reference/esql/functions/types/in.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | inlist | result +boolean | boolean | boolean +cartesian_point | cartesian_point | boolean +cartesian_shape | cartesian_shape | boolean +double | double | boolean +geo_point | geo_point | boolean +geo_shape | geo_shape | boolean +integer | integer | boolean +ip | ip | boolean +keyword | keyword | boolean +keyword | text | boolean +long | long | boolean +text | keyword | boolean +text | text | boolean +version | version | boolean +|=== diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than.asciidoc +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc index 771daf1a953b2..c506328126a94 100644 --- a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -5,7 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 5b7293d4a4293..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -6,9 +6,12 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/docs/reference/esql/functions/types/median.asciidoc b/docs/reference/esql/functions/types/median.asciidoc new file mode 100644 index 0000000000000..273dae4af76c2 --- /dev/null +++ b/docs/reference/esql/functions/types/median.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | double +long | double +|=== diff --git a/docs/reference/esql/functions/types/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/types/median_absolute_deviation.asciidoc new file mode 100644 index 0000000000000..273dae4af76c2 --- /dev/null +++ b/docs/reference/esql/functions/types/median_absolute_deviation.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | double +long | double +|=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 5b7293d4a4293..35ce5811e0cd0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -6,9 +6,12 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip +keyword | keyword long | long +text | text +version | version |=== diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index 49dcef6dc8860..a1894e429ae82 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -8,7 +8,7 @@ field1 | field2 | result boolean | boolean | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape -datetime | datetime | datetime +date | date | date double | double | double geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 8af6b76591acb..260c531731f04 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -8,7 +8,7 @@ field | result boolean | integer cartesian_point | integer cartesian_shape | integer -datetime | integer +date | integer double | integer geo_point | integer geo_shape | integer diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index a6b78f781f17a..68e546451c8cb 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index e077c57971a4a..35633544d99a0 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -8,7 +8,7 @@ field | result boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape -datetime | datetime +date | date double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 4e5f0a5e0ae89..8ea36aebbad37 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | boolean -datetime | datetime +date | date double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_percentile.asciidoc b/docs/reference/esql/functions/types/mv_percentile.asciidoc new file mode 100644 index 0000000000000..99a58b9c3d2e2 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_percentile.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | percentile | result +double | double | double +double | integer | double +double | long | double +integer | double | integer +integer | integer | integer +integer | long | integer +long | double | long +long | integer | long +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/mv_pseries_weighted_sum.asciidoc b/docs/reference/esql/functions/types/mv_pseries_weighted_sum.asciidoc new file mode 100644 index 0000000000000..f28e61f17aa33 --- /dev/null +++ b/docs/reference/esql/functions/types/mv_pseries_weighted_sum.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | p | result +double | double | double +|=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 568de10f53d32..0a9dc073370c7 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -8,7 +8,7 @@ field | start | end | result boolean | integer | integer | boolean cartesian_point | integer | integer | cartesian_point cartesian_shape | integer | integer | cartesian_shape -datetime | integer | integer | datetime +date | integer | integer | date double | integer | integer | double geo_point | integer | integer | geo_point geo_shape | integer | integer | geo_shape diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 24925ca8a6587..93965187482ac 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -6,7 +6,7 @@ |=== field | order | result boolean | keyword | boolean -datetime | keyword | datetime +date | keyword | date double | keyword | double integer | keyword | integer ip | keyword | ip diff --git a/docs/reference/esql/functions/types/neg.asciidoc b/docs/reference/esql/functions/types/neg.asciidoc index 28d3b2a512dec..c0d0a21711552 100644 --- a/docs/reference/esql/functions/types/neg.asciidoc +++ b/docs/reference/esql/functions/types/neg.asciidoc @@ -4,7 +4,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -v | result +field | result date_period | date_period double | double integer | integer diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc index 497c9319fedb3..ad0e46ef4b8da 100644 --- a/docs/reference/esql/functions/types/not_equals.asciidoc +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -8,7 +8,7 @@ lhs | rhs | result boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean -datetime | datetime | boolean +date | date | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/now.asciidoc b/docs/reference/esql/functions/types/now.asciidoc index 5737d98f2f7db..b474ab1042050 100644 --- a/docs/reference/esql/functions/types/now.asciidoc +++ b/docs/reference/esql/functions/types/now.asciidoc @@ -5,5 +5,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== result -datetime +date |=== diff --git a/docs/reference/esql/functions/types/rlike.asciidoc b/docs/reference/esql/functions/types/rlike.asciidoc index 436333fddf5ee..46532f2af3bf3 100644 --- a/docs/reference/esql/functions/types/rlike.asciidoc +++ b/docs/reference/esql/functions/types/rlike.asciidoc @@ -4,7 +4,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -str | pattern | caseInsensitive | result -keyword | keyword | boolean | boolean -text | text | boolean | boolean +str | pattern | result +keyword | keyword | boolean +text | text | boolean |=== diff --git a/docs/reference/esql/functions/types/st_centroid_agg.asciidoc b/docs/reference/esql/functions/types/st_centroid_agg.asciidoc new file mode 100644 index 0000000000000..da95e0b9bec1a --- /dev/null +++ b/docs/reference/esql/functions/types/st_centroid_agg.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +cartesian_point | cartesian_point +geo_point | geo_point +|=== diff --git a/docs/reference/esql/functions/types/starts_with.asciidoc b/docs/reference/esql/functions/types/starts_with.asciidoc index a6c77014966b4..c88378044f0db 100644 --- a/docs/reference/esql/functions/types/starts_with.asciidoc +++ b/docs/reference/esql/functions/types/starts_with.asciidoc @@ -6,5 +6,7 @@ |=== str | prefix | result keyword | keyword | boolean +keyword | text | boolean +text | keyword | boolean text | text | boolean |=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc index d309f651705f0..c3ded301ebe68 100644 --- a/docs/reference/esql/functions/types/sub.asciidoc +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -5,9 +5,9 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== lhs | rhs | result +date | date_period | date +date | time_duration | date date_period | date_period | date_period -datetime | date_period | datetime -datetime | time_duration | datetime double | double | double double | integer | double double | long | double diff --git a/docs/reference/esql/functions/types/sum.asciidoc b/docs/reference/esql/functions/types/sum.asciidoc new file mode 100644 index 0000000000000..aa4c3ad0d7dd8 --- /dev/null +++ b/docs/reference/esql/functions/types/sum.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | long +long | long +|=== diff --git a/docs/reference/esql/functions/types/to_datetime.asciidoc b/docs/reference/esql/functions/types/to_datetime.asciidoc index 52c4cebb661cf..80c986efca794 100644 --- a/docs/reference/esql/functions/types/to_datetime.asciidoc +++ b/docs/reference/esql/functions/types/to_datetime.asciidoc @@ -5,11 +5,11 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | result -datetime | datetime -double | datetime -integer | datetime -keyword | datetime -long | datetime -text | datetime -unsigned_long | datetime +date | date +double | date +integer | date +keyword | date +long | date +text | date +unsigned_long | date |=== diff --git a/docs/reference/esql/functions/types/to_double.asciidoc b/docs/reference/esql/functions/types/to_double.asciidoc index cff686c7bc4ca..d5f5833cd7249 100644 --- a/docs/reference/esql/functions/types/to_double.asciidoc +++ b/docs/reference/esql/functions/types/to_double.asciidoc @@ -9,7 +9,7 @@ boolean | double counter_double | double counter_integer | double counter_long | double -datetime | double +date | double double | double integer | double keyword | double diff --git a/docs/reference/esql/functions/types/to_integer.asciidoc b/docs/reference/esql/functions/types/to_integer.asciidoc index 974f3c9c82d88..d67f8f07affd9 100644 --- a/docs/reference/esql/functions/types/to_integer.asciidoc +++ b/docs/reference/esql/functions/types/to_integer.asciidoc @@ -7,7 +7,7 @@ field | result boolean | integer counter_integer | integer -datetime | integer +date | integer double | integer integer | integer keyword | integer diff --git a/docs/reference/esql/functions/types/to_long.asciidoc b/docs/reference/esql/functions/types/to_long.asciidoc index b3959c5444e34..a07990cb1cfbf 100644 --- a/docs/reference/esql/functions/types/to_long.asciidoc +++ b/docs/reference/esql/functions/types/to_long.asciidoc @@ -8,7 +8,7 @@ field | result boolean | long counter_integer | long counter_long | long -datetime | long +date | long double | long integer | long keyword | long diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index f14cfbb39929f..26a5b31a2a589 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -8,7 +8,7 @@ field | result boolean | keyword cartesian_point | keyword cartesian_shape | keyword -datetime | keyword +date | keyword double | keyword geo_point | keyword geo_shape | keyword diff --git a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc index a271e1a19321d..87b21f3948dad 100644 --- a/docs/reference/esql/functions/types/to_unsigned_long.asciidoc +++ b/docs/reference/esql/functions/types/to_unsigned_long.asciidoc @@ -6,7 +6,7 @@ |=== field | result boolean | unsigned_long -datetime | unsigned_long +date | unsigned_long double | unsigned_long integer | unsigned_long keyword | unsigned_long diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index 6cfee3dfaf941..0eb329c10b9ed 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -6,8 +6,9 @@ |=== field | limit | order | result boolean | integer | keyword | boolean -datetime | integer | keyword | datetime +date | integer | keyword | date double | integer | keyword | double integer | integer | keyword | integer +ip | integer | keyword | ip long | integer | keyword | long |=== diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc new file mode 100644 index 0000000000000..35ce5811e0cd0 --- /dev/null +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +boolean | boolean +date | date +double | double +integer | integer +ip | ip +keyword | keyword +long | long +text | text +version | version +|=== diff --git a/docs/reference/esql/functions/types/weighted_avg.asciidoc b/docs/reference/esql/functions/types/weighted_avg.asciidoc new file mode 100644 index 0000000000000..55cc14eb453c3 --- /dev/null +++ b/docs/reference/esql/functions/types/weighted_avg.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | weight | result +double | double | double +double | integer | double +double | long | double +integer | double | double +integer | integer | double +integer | long | double +long | double | double +long | integer | double +long | long | double +|=== diff --git a/docs/reference/esql/functions/values.asciidoc b/docs/reference/esql/functions/values.asciidoc deleted file mode 100644 index f13338a572b36..0000000000000 --- a/docs/reference/esql/functions/values.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -[discrete] -[[esql-agg-values]] -=== `VALUES` - -preview::["Do not use `VALUES` on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] - -*Syntax* - -[source,esql] ----- -VALUES(expression) ----- - -`expression`:: -Expression of any type except `geo_point`, `cartesian_point`, `geo_shape`, or `cartesian_shape`. - -*Description* - -Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. -If you need the values returned in order use <>. - -WARNING: This can use a significant amount of memory and ES|QL doesn't yet - grow aggregations beyond memory. So this aggregation will work until - it is used to collect more values than can fit into memory. Once it - collects too many values it will fail the query with - a <>. - -*Example* - -[source.merge.styled,esql] ----- -include::{esql-specs}/string.csv-spec[tag=values-grouped] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/string.csv-spec[tag=values-grouped-result] -|=== - diff --git a/docs/reference/esql/functions/weighted-avg.asciidoc b/docs/reference/esql/functions/weighted-avg.asciidoc deleted file mode 100644 index 4f166801641df..0000000000000 --- a/docs/reference/esql/functions/weighted-avg.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -[discrete] -[[esql-agg-weighted-avg]] -=== `WEIGHTED_AVG` - -*Syntax* - -[source,esql] ----- -WEIGHTED_AVG(expression, weight) ----- - -`expression`:: -Numeric expression. - -`weight`:: -Numeric weight. - -*Description* - -The weighted average of a numeric expression. - -*Supported types* - -The result is always a `double` no matter the input type. - -*Examples* - -[source.merge.styled,esql] ----- -include::{esql-specs}/stats.csv-spec[tag=weighted-avg] ----- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats.csv-spec[tag=weighted-avg-result] -|=== diff --git a/docs/reference/esql/processing-commands/inlinestats.asciidoc b/docs/reference/esql/processing-commands/inlinestats.asciidoc new file mode 100644 index 0000000000000..0b8d38c7b280f --- /dev/null +++ b/docs/reference/esql/processing-commands/inlinestats.asciidoc @@ -0,0 +1,102 @@ +[discrete] +[[esql-inlinestats-by]] +=== `INLINESTATS ... BY` + +experimental::["INLINESTATS is highly experimental and only available in SNAPSHOT versions."] + +The `INLINESTATS` command calculates an aggregate result and adds new columns +with the result to the stream of input data. + +**Syntax** + +[source,esql] +---- +INLINESTATS [column1 =] expression1[, ..., [columnN =] expressionN] +[BY grouping_expression1[, ..., grouping_expressionN]] +---- + +*Parameters* + +`columnX`:: +The name by which the aggregated value is returned. If omitted, the name is +equal to the corresponding expression (`expressionX`). If multiple columns +have the same name, all but the rightmost column with this name will be ignored. + +`expressionX`:: +An expression that computes an aggregated value. If its name coincides with one +of the computed columns, that column will be ignored. + +`grouping_expressionX`:: +An expression that outputs the values to group by. + +NOTE: Individual `null` values are skipped when computing aggregations. + +*Description* + +The `INLINESTATS` command calculates an aggregate result and merges that result +back into the stream of input data. Without the optional `BY` clause this will +produce a single result which is appended to each row. With a `BY` clause this +will produce one result per grouping and merge the result into the stream based on +matching group keys. + +All of the <> are supported. + +*Examples* + +Find the employees that speak the most languages (it's a tie!): + +[source.merge.styled,esql] +---- +include::{esql-specs}/inlinestats.csv-spec[tag=max-languages] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/inlinestats.csv-spec[tag=max-languages-result] +|=== + +Find the longest tenured employee who's last name starts with each letter of the alphabet: + +[source.merge.styled,esql] +---- +include::{esql-specs}/inlinestats.csv-spec[tag=longest-tenured-by-first] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/inlinestats.csv-spec[tag=longest-tenured-by-first-result] +|=== + +Find the northern and southern most airports: + +[source.merge.styled,esql] +---- +include::{esql-specs}/inlinestats.csv-spec[tag=extreme-airports] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/inlinestats.csv-spec[tag=extreme-airports-result] +|=== + +NOTE: Our test data doesn't have many "small" airports. + +If a `BY` field is multivalued then `INLINESTATS` will put the row in *each* +bucket like <>: + +[source.merge.styled,esql] +---- +include::{esql-specs}/inlinestats.csv-spec[tag=mv-group] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/inlinestats.csv-spec[tag=mv-group-result] +|=== + +To treat each group key as its own row use <> before `INLINESTATS`: + +[source.merge.styled,esql] +---- +include::{esql-specs}/inlinestats.csv-spec[tag=mv-expand] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/inlinestats.csv-spec[tag=mv-expand-result] +|=== diff --git a/docs/reference/esql/processing-commands/lookup.asciidoc b/docs/reference/esql/processing-commands/lookup.asciidoc index 7bb3a5791deef..ca456d8e70eed 100644 --- a/docs/reference/esql/processing-commands/lookup.asciidoc +++ b/docs/reference/esql/processing-commands/lookup.asciidoc @@ -2,7 +2,7 @@ [[esql-lookup]] === `LOOKUP` -experimental::["LOOKUP is a highly experimental and only available in SNAPSHOT versions."] +experimental::["LOOKUP is highly experimental and only available in SNAPSHOT versions."] `LOOKUP` matches values from the input against a `table` provided in the request, adding the other fields from the `table` to the output. diff --git a/docs/reference/geospatial-analysis.asciidoc b/docs/reference/geospatial-analysis.asciidoc index 7577bb222127f..6760040e14bc7 100644 --- a/docs/reference/geospatial-analysis.asciidoc +++ b/docs/reference/geospatial-analysis.asciidoc @@ -2,7 +2,7 @@ [[geospatial-analysis]] = Geospatial analysis -Did you know that {es} has geospatial capabilities? https://www.elastic.co/blog/geo-location-and-search[{es} and geo] go way back, to 2010. A lot has happened since then and today {es} provides robust geospatial capabilities with speed, all with a stack that scales automatically. +Did you know that {es} has geospatial capabilities? https://www.elastic.co/blog/geo-location-and-search[{es} and geo] go way back, to 2010. A lot has happened since then and today {es} provides robust geospatial capabilities with speed, all with a stack that scales automatically. Not sure where to get started with {es} and geo? Then, you have come to the right place. @@ -18,8 +18,10 @@ Have an index with lat/lon pairs but no geo_point mapping? Use <> lets you clean, transform, and augment your data before indexing. +Data is often messy and incomplete. <> lets you clean, transform, and augment your data before indexing. +* Use <> together with <> to index CSV files with geo data. + Kibana's {kibana-ref}/import-geospatial-data.html[Import CSV] feature can help with this. * Use <> to add geographical location of an IPv4 or IPv6 address. * Use <> to convert grid tiles or hexagonal cell ids to bounding boxes or polygons which describe their shape. * Use <> for reverse geocoding. For example, use {kibana-ref}/reverse-geocoding-tutorial.html[reverse geocoding] to visualize metropolitan areas by web traffic. @@ -30,6 +32,18 @@ Data is often messy and incomplete. <> lets you clean, <> answer location-driven questions. Find documents that intersect with, are within, are contained by, or do not intersect your query geometry. Combine geospatial queries with full text search queries for unparalleled searching experience. For example, "Show me all subscribers that live within 5 miles of our new gym location, that joined in the last year and have running mentioned in their profile". +[discrete] +[[esql-query]] +=== ES|QL + +<> has support for <> functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points. + +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> +* experimental:[] <> + [discrete] [[geospatial-aggregate]] === Aggregate @@ -42,12 +56,12 @@ Geospatial bucket aggregations: * <> groups geo_point and geo_shape values into buckets that represent a grid. * <> groups geo_point and geo_shape values into buckets that represent an H3 hexagonal cell. * <> groups geo_point and geo_shape values into buckets that represent a grid. Each cell corresponds to a {wikipedia}/Tiled_web_map[map tile] as used by many online map sites. - + Geospatial metric aggregations: * <> computes the geographic bounding box containing all values for a Geopoint or Geoshape field. * <> computes the weighted centroid from all coordinate values for geo fields. -* <> aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create {kibana-ref}/asset-tracking-tutorial.html[vehicle tracks]. +* <> aggregates all geo_point values within a bucket into a LineString ordered by the chosen sort field. Use geo_line aggregation to create {kibana-ref}/asset-tracking-tutorial.html[vehicle tracks]. Combine aggregations to perform complex geospatial analysis. For example, to calculate the most recent GPS tracks per flight, use a <> to group documents into buckets per aircraft. Then use geo-line aggregation to compute a track for each aircraft. In another example, use geotile grid aggregation to group documents into a grid. Then use geo-centroid aggregation to find the weighted centroid of each grid cell. @@ -79,4 +93,4 @@ Put machine learning to work for you and find the data that should stand out wit Let your location data drive insights and action with {kibana-ref}/geo-alerting.html[geographic alerts]. Commonly referred to as geo-fencing, track moving objects as they enter or exit a boundary to receive notifications through common business systems (email, Slack, Teams, PagerDuty, and more). -Interested in learning more? Follow {kibana-ref}/asset-tracking-tutorial.html[step-by-step instructions] for setting up tracking containment alerts to monitor moving vehicles. \ No newline at end of file +Interested in learning more? Follow {kibana-ref}/asset-tracking-tutorial.html[step-by-step instructions] for setting up tracking containment alerts to monitor moving vehicles. diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index 6ac7bd2001d45..34714e80e1b18 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -204,9 +204,8 @@ for health status set `verbose` to `false` to disable the more expensive analysi `help_url` field. `affected_resources`:: - (Optional, array of strings) If the root cause pertains to multiple resources in the - cluster (like indices, shards, nodes, etc...) this will hold all resources that this - diagnosis is applicable for. + (Optional, object) An object where the keys represent resource types (for example, indices, shards), + and the values are lists of the specific resources affected by the issue. `help_url`:: (string) A link to the troubleshooting guide that'll fix the health problem. diff --git a/docs/reference/high-availability/cluster-design.asciidoc b/docs/reference/high-availability/cluster-design.asciidoc index 6c17a494f36ae..105c8b236b0b1 100644 --- a/docs/reference/high-availability/cluster-design.asciidoc +++ b/docs/reference/high-availability/cluster-design.asciidoc @@ -246,7 +246,8 @@ accumulate into a noticeable performance penalty. An unreliable network may have frequent network partitions. {es} will automatically recover from a network partition as quickly as it can but your cluster may be partly unavailable during a partition and will need to spend time and resources to -resynchronize any missing data and rebalance itself once the partition heals. +<> and <> +itself once the partition heals. Recovering from a failure may involve copying a large amount of data between nodes so the recovery time is often determined by the available bandwidth. diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 31f4039bcfaca..5f67014d5bb4a 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -152,16 +152,18 @@ same data. However, very large shards can also cause slower searches and will take longer to recover after a failure. There is no hard limit on the physical size of a shard, and each shard can in -theory contain up to just over two billion documents. However, experience shows -that shards between 10GB and 50GB typically work well for many use cases, as -long as the per-shard document count is kept below 200 million. +theory contain up to <>. However, experience shows that shards between 10GB and 50GB +typically work well for many use cases, as long as the per-shard document count +is kept below 200 million. You may be able to use larger shards depending on your network and use case, and smaller shards may be appropriate for {enterprise-search-ref}/index.html[Enterprise Search] and similar use cases. If you use {ilm-init}, set the <>'s -`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB. +`max_primary_shard_size` threshold to `50gb` to avoid shards larger than 50GB +and `min_primary_shard_size` threshold to `10gb` to avoid shards smaller than 10GB. To see the current size of your shards, use the <>. @@ -184,6 +186,29 @@ index prirep shard store // TESTRESPONSE[s/\.ds-my-data-stream-2099\.05\.06-000001/my-index-000001/] // TESTRESPONSE[s/50gb/.*/] +If an index's shard is experiencing degraded performance from surpassing the +recommended 50GB size, you may consider fixing the index's shards' sizing. +Shards are immutable and therefore their size is fixed in place, +so indices must be copied with corrected settings. This requires first ensuring +sufficient disk to copy the data. Afterwards, you can copy the index's data +with corrected settings via one of the following options: + +* running <> to increase number of primary +shards + +* creating a destination index with corrected settings and then running +<> + +Kindly note performing a <> and/or +<> would be insufficient to resolve shards' +sizing. + +Once a source index's data is copied into its destination index, the source +index can be <>. You may then consider setting +<> against the destination index for the source +index's name to point to it for continuity. + + [discrete] [[shard-count-recommendation]] ==== Master-eligible nodes should have at least 1GB of heap per 3000 indices @@ -501,6 +526,7 @@ POST _reindex Here’s how to resolve common shard-related errors. [discrete] +[[troubleshooting-max-shards-open]] ==== this action would add [x] total shards, but this cluster currently has [y]/[z] maximum shards open; The <> cluster diff --git a/docs/reference/ilm/actions/ilm-delete.asciidoc b/docs/reference/ilm/actions/ilm-delete.asciidoc index eac3b9804709a..beed60105ed96 100644 --- a/docs/reference/ilm/actions/ilm-delete.asciidoc +++ b/docs/reference/ilm/actions/ilm-delete.asciidoc @@ -15,6 +15,18 @@ Deletes the searchable snapshot created in a previous phase. Defaults to `true`. This option is applicable when the <> action is used in any previous phase. ++ +If you set this option to `false`, use the <> to remove {search-snaps} from your snapshot repository when +they are no longer needed. ++ +If you manually delete an index before the {ilm-cap} delete phase runs, then +{ilm-init} will not delete the underlying {search-snap}. Use the +<> to remove the {search-snap} from +your snapshot repository when it is no longer needed. ++ +See <> for +further information about deleting {search-snaps}. WARNING: If a policy with a searchable snapshot action is applied on an existing searchable snapshot index, the snapshot backing this index will NOT be deleted because it was not created by this policy. If you want diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 632cb982b3968..fc9a35e4ef570 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -5,7 +5,7 @@ Delete policy ++++ -Deletes an index lifecycle policy. +Deletes an index <> policy. [[ilm-delete-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 348a9e7f99e78..a1ddde8c9f2d9 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -5,7 +5,7 @@ Explain lifecycle ++++ -Retrieves the current lifecycle status for one or more indices. For data +Retrieves the current <> status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 7443610065487..b4e07389a9fb7 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -5,7 +5,7 @@ Get policy ++++ -Retrieves a lifecycle policy. +Retrieves a <> policy. [[ilm-get-lifecycle-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 7e9e963f6f369..f2ab8d65ec9a1 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -7,7 +7,7 @@ Get {ilm} status ++++ -Retrieves the current {ilm} ({ilm-init}) status. +Retrieves the current <> ({ilm-init}) status. You can start or stop {ilm-init} with the <> and <> APIs. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 19cc9f7088867..f3441fa997cff 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -5,7 +5,7 @@ Move to step ++++ -Triggers execution of a specific step in the lifecycle policy. +Triggers execution of a specific step in the <> policy. [[ilm-move-to-step-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index ffd59a14d8c25..390f6b1bb4d15 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -5,7 +5,7 @@ Create or update lifecycle policy ++++ -Creates or updates lifecycle policy. See <> for +Creates or updates <> policy. See <> for definitions of policy components. [[ilm-put-lifecycle-request]] diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 711eccc298df1..107cab4d5aa19 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -5,7 +5,7 @@ Remove policy ++++ -Removes assigned lifecycle policies from an index or a data stream's backing +Removes assigned <> policies from an index or a data stream's backing indices. [[ilm-remove-policy-request]] diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index cb2587fbb151b..8f01f15e0c3ad 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -5,7 +5,7 @@ Retry policy ++++ -Retry executing the policy for an index that is in the ERROR step. +Retry executing the <> policy for an index that is in the ERROR step. [[ilm-retry-policy-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 32db585c6b14c..c38b3d9ca8831 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -7,7 +7,7 @@ Start {ilm} ++++ -Start the {ilm} ({ilm-init}) plugin. +Start the <> ({ilm-init}) plugin. [[ilm-start-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index 1e9cfb94d0b1f..a6100d794c2d3 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -7,7 +7,7 @@ Stop {ilm} ++++ -Stop the {ilm} ({ilm-init}) plugin. +Stop the <> ({ilm-init}) plugin. [[ilm-stop-request]] ==== {api-request-title} diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index d922fa6687823..f810afc6c2b5f 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -2,7 +2,7 @@ [[index-lifecycle-error-handling]] == Troubleshooting {ilm} errors -When {ilm-init} executes a lifecycle policy, it's possible for errors to occur +When <> executes a lifecycle policy, it's possible for errors to occur while performing the necessary index operations for a step. When this happens, {ilm-init} moves the index to an `ERROR` step. If {ilm-init} cannot resolve the error automatically, execution is halted diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index acf59645dae13..040e02742f5e7 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -5,7 +5,7 @@ Index lifecycle ++++ -{ilm-init} defines five index lifecycle _phases_: +<> defines five index lifecycle _phases_: * **Hot**: The index is actively being updated and queried. * **Warm**: The index is no longer being updated but is still being queried. diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 24149afe802a2..7232de12c8c50 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -113,7 +113,7 @@ Index mode supports the following values: `time_series`::: Index mode optimized for storage of metrics documented in <>. -`logs`::: Index mode optimized for storage of logs. It applies default sort settings on the `hostname` and `timestamp` fields and uses <>. <> on different fields is still allowed. +`logsdb`::: Index mode optimized for storage of logs. It applies default sort settings on the `hostname` and `timestamp` fields and uses <>. <> on different fields is still allowed. preview:[] [[routing-partition-size]] `index.routing_partition_size`:: diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index b4e4bd33f819a..06b4d9d92e49f 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -35,21 +35,7 @@ index, or alias. Use the index recovery API to get information about ongoing and completed shard recoveries. -// tag::shard-recovery-desc[] -Shard recovery is the process of initializing a shard copy, such as restoring a -primary shard from a snapshot or syncing a replica shard from a primary shard. -When a shard recovery completes, the recovered shard is available for search -and indexing. - -Recovery automatically occurs during the following processes: - -* Node startup. This type of recovery is called a local store recovery. -* Primary shard replication. -* Relocation of a shard to a different node in the same cluster. -* <> operation. -* <>, <>, or -<> operation. -// end::shard-recovery-desc[] +include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[] The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last @@ -360,7 +346,7 @@ The API returns the following response: "index1" : { "shards" : [ { "id" : 0, - "type" : "STORE", + "type" : "EXISTING_STORE", "stage" : "DONE", "primary" : true, "start_time" : "2014-02-24T12:38:06.349", diff --git a/docs/reference/inference/images/inference-landscape.png b/docs/reference/inference/images/inference-landscape.png new file mode 100644 index 0000000000000..a35d1370fd09b Binary files /dev/null and b/docs/reference/inference/images/inference-landscape.png differ diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 9c75820a8f92b..8fdf8aecc2ae5 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -5,14 +5,16 @@ experimental[] IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio or -Hugging Face. For built-in models and models uploaded through Eland, the {infer} -APIs offer an alternative way to use and manage trained models. However, if you -do not plan to use the {infer} APIs to use these models or if you want to use -non-NLP models, use the <>. +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, +Google AI Studio or Hugging Face. For built-in models and models uploaded +through Eland, the {infer} APIs offer an alternative way to use and manage +trained models. However, if you do not plan to use the {infer} APIs to use these +models or if you want to use non-NLP models, use the +<>. The {infer} APIs enable you to create {infer} endpoints and use {ml} models of -different providers - such as Cohere, OpenAI, or HuggingFace - as a service. Use +different providers - such as Amazon Bedrock, Anthropic, Azure AI Studio, +Cohere, Google AI, Mistral, OpenAI, or HuggingFace - as a service. Use the following APIs to manage {infer} models and perform {infer}: * <> @@ -20,12 +22,26 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> +[[inference-landscape]] +.A representation of the Elastic inference landscape +image::images/inference-landscape.png[A representation of the Elastic inference landscape,align="center"] + +An {infer} endpoint enables you to use the corresponding {ml} model without +manual deployment and apply it to your data at ingestion time through +<>. + +Choose a model from your provider or use ELSER – a retrieval model trained by +Elastic –, then create an {infer} endpoint by the <>. +Now use <> to perform +<> on your data. include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::service-alibabacloud-ai-search.asciidoc[] include::service-amazon-bedrock.asciidoc[] +include::service-anthropic.asciidoc[] include::service-azure-ai-studio.asciidoc[] include::service-azure-openai.asciidoc[] include::service-cohere.asciidoc[] diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index b809a96b8f81a..ba26a563541fc 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -6,10 +6,17 @@ experimental[] Creates an {infer} endpoint to perform an {infer} task. -IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in -{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI or Hugging Face. -For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. -However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. +[IMPORTANT] +==== +* The {infer} APIs enable you to use certain services, such as built-in +{ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, +Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic or Hugging Face. +* For built-in models and models uploaded through Eland, the {infer} APIs offer an +alternative way to use and manage trained models. However, if you do not plan to +use the {infer} APIs to use these models or if you want to use non-NLP models, +use the <>. +==== + [discrete] [[put-inference-api-request]] @@ -32,7 +39,9 @@ The create {infer} API enables you to create an {infer} endpoint and configure a The following services are available through the {infer} API, click the links to review the configuration details of the services: +* <> * <> +* <> * <> * <> * <> @@ -44,10 +53,5 @@ The following services are available through the {infer} API, click the links to * <> * <> -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -If using the Python client, you can set the `timeout` parameter to a higher value. -==== \ No newline at end of file +The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of +the services connect to external providers. \ No newline at end of file diff --git a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc new file mode 100644 index 0000000000000..23a3d532635ac --- /dev/null +++ b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc @@ -0,0 +1,184 @@ +[[infer-service-alibabacloud-ai-search]] +=== AlibabaCloud AI Search {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `alibabacloud-ai-search` service. + +[discrete] +[[infer-service-alibabacloud-ai-search-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-alibabacloud-ai-search-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `text_embedding`, +* `sparse_embedding`, +* `rerank`. +-- + +[discrete] +[[infer-service-alibabacloud-ai-search-api-request-body]] +==== {api-request-body-title} + +`service`:: +(Required, string) The type of service supported for the specified task type. +In this case, +`alibabacloud-ai-search`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `alibabacloud-ai-search` service. +-- + +`api_key`::: +(Required, string) +A valid API key for the AlibabaCloud AI Search API. + +`service_id`::: +(Required, string) +The name of the model service to use for the {infer} task. ++ +-- +Available service_ids for the `text_embedding` task: + +* `ops-text-embedding-001` +* `ops-text-embedding-zh-001` +* `ops-text-embedding-en-001` +* `ops-text-embedding-002` + +For the supported `text_embedding` service_ids, refer to the https://help.aliyun.com/zh/open-search/search-platform/developer-reference/text-embedding-api-details[documentation]. + +Available service_id for the `sparse_embedding` task: + +* `ops-text-sparse-embedding-001` + +For the supported `sparse_embedding` service_id, refer to the https://help.aliyun.com/zh/open-search/search-platform/developer-reference/text-sparse-embedding-api-details[documentation]. + +Available service_id for the `rerank` task is: + +* `ops-bge-reranker-larger` + +For the supported `rerank` service_id, refer to the https://help.aliyun.com/zh/open-search/search-platform/developer-reference/ranker-api-details[documentation]. +-- + +`host`::: +(Required, string) +The name of the host address used for the {infer} task. You can find the host address at https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[ the API keys section] of the documentation. + +`workspace`::: +(Required, string) +The name of the workspace used for the {infer} task. + +`rate_limit`::: +(Optional, object) +By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. +This helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. +To modify this, set the `requests_per_minute` setting of this object in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] +-- + + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`input_type`::: +(Optional, string) +Specifies the type of input passed to the model. +Valid values are: +* `ingest`: for storing document embeddings in a vector database. +* `search`: for storing embeddings of search queries run against a vector database to find relevant documents. +===== ++ +.`task_settings` for the `sparse_embedding` task type +[%collapsible%closed] +===== +`input_type`::: +(Optional, string) +Specifies the type of input passed to the model. +Valid values are: +* `ingest`: for storing document embeddings in a vector database. +* `search`: for storing embeddings of search queries run against a vector database to find relevant documents. + +`return_token`::: +(Optional, boolean) +If `true`, the token name will be returned in the response. Defaults to `false` which means only the token ID will be returned in the response. +===== + +[discrete] +[[inference-example-alibabacloud-ai-search]] +==== AlibabaCloud AI Search service examples + +The following example shows how to create an {infer} endpoint called `alibabacloud_ai_search_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/alibabacloud_ai_search_embeddings +{ + "service": "alibabacloud-ai-search", + "service_settings": { + "api_key": "", + "service_id": "ops-text-embedding-001", + "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + "workspace": "default" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The following example shows how to create an {infer} endpoint called +`alibabacloud_ai_search_sparse` to perform a `sparse_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/alibabacloud_ai_search_sparse +{ + "service": "alibabacloud-ai-search", + "service_settings": { + "api_key": "", + "service_id": "ops-text-sparse-embedding-001", + "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + "workspace": "default" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The next example shows how to create an {infer} endpoint called +`alibabacloud_ai_search_rerank` to perform a `rerank` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/rerank/alibabacloud_ai_search_rerank +{ + "service": "alibabacloud-ai-search", + "service_settings": { + "api_key": "", + "service_id": "ops-bge-reranker-larger", + "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + "workspace": "default" + } +} +------------------------------------------------------------ +// TEST[skip:TBD] diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc index 4ffa368613a0e..dbffd5c26fbcc 100644 --- a/docs/reference/inference/service-amazon-bedrock.asciidoc +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -122,14 +122,6 @@ Only available for `anthropic`, `cohere`, and `mistral` providers. Alternative to `temperature`. Limits samples to the top-K most likely words, balancing coherence and variability. Should not be used if `temperature` is specified. -===== -+ -.`task_settings` for the `text_embedding` task type -[%collapsible%closed] -===== - -There are no `task_settings` available for the `text_embedding` task type. - ===== [discrete] diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc index 52d71e0bc02a5..84eae6e880617 100644 --- a/docs/reference/inference/service-cohere.asciidoc +++ b/docs/reference/inference/service-cohere.asciidoc @@ -131,6 +131,7 @@ Specify whether to return doc text within the results. `top_n`:: (Optional, integer) The number of most relevant documents to return, defaults to the number of the documents. +If this {infer} endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. ===== + .`task_settings` for the `text_embedding` task type diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 3b9b5b1928d7b..572cad591fba6 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,7 +1,12 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service -Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` +service. + +NOTE: If you use the E5 model through the `elasticsearch` service, the API +request will automatically download and deploy the model if it isn't downloaded +yet. [discrete] @@ -26,6 +31,7 @@ include::inference-shared.asciidoc[tag=task-type] Available task types: * `rerank`, +* `sparse_embedding`, * `text_embedding`. -- @@ -35,7 +41,7 @@ Available task types: `service`:: (Required, string) -The type of service supported for the specified task type. In this case, +The type of service supported for the specified task type. In this case, `elasticsearch`. `service_settings`:: @@ -46,6 +52,22 @@ include::inference-shared.asciidoc[tag=service-settings] These settings are specific to the `elasticsearch` service. -- +`adaptive_allocations`::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`:::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `model_id`::: (Required, string) The name of the model to use for the {infer} task. @@ -54,11 +76,13 @@ It can be the ID of either a built-in model (for example, `.multilingual-e5-smal `num_allocations`::: (Required, integer) -The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. +The total number of allocations this model is assigned across machine learning nodes. +Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `num_threads`::: (Required, integer) -Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Must be a power of 2. Max allowed value is 32. `task_settings`:: @@ -81,6 +105,9 @@ Returns the document instead of only the index. Defaults to `true`. The following example shows how to create an {infer} endpoint called `my-e5-model` to perform a `text_embedding` task type. +The API request below will automatically download the E5 model if it isn't +already downloaded and then deploy the model. + [source,console] ------------------------------------------------------------ PUT _inference/text_embedding/my-e5-model @@ -98,6 +125,14 @@ PUT _inference/text_embedding/my-e5-model Valid values are `.multilingual-e5-small` and `.multilingual-e5-small_linux-x86_64`. For further details, refer to the {ml-docs}/ml-nlp-e5.html[E5 model documentation]. +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== + [discrete] [[inference-example-eland]] ==== Models uploaded by Eland via the elasticsearch service @@ -107,16 +142,45 @@ The following example shows how to create an {infer} endpoint called [source,console] ------------------------------------------------------------ -PUT _inference/text_embedding/my-msmarco-minilm-model +PUT _inference/text_embedding/my-msmarco-minilm-model <1> { "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, - "model_id": "msmarco-MiniLM-L12-cos-v5" <1> + "model_id": "msmarco-MiniLM-L12-cos-v5" <2> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> Provide an unique identifier for the inference endpoint. The `inference_id` must be unique and must not match the `model_id`. +<2> The `model_id` must be the ID of a text embedding model which has already been +{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. + +[discrete] +[[inference-example-adaptive-allocation]] +==== Setting adaptive allocation for E5 via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called +`my-e5-model` to perform a `text_embedding` task type and configure adaptive +allocations. + +The API request below will automatically download the E5 model if it isn't +already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/my-e5-model +{ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, + "model_id": ".multilingual-e5-small" } } ------------------------------------------------------------ // TEST[skip:TBD] -<1> The `model_id` must be the ID of a text embedding model which has already been -{ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. \ No newline at end of file diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 829ff4968c5be..fdce94901984b 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -3,6 +3,9 @@ Creates an {infer} endpoint to perform an {infer} task with the `elser` service. +NOTE: The API request will automatically download and deploy the ELSER model if +it isn't already downloaded. + [discrete] [[infer-service-elser-api-request]] @@ -34,7 +37,7 @@ Available task types: `service`:: (Required, string) -The type of service supported for the specified task type. In this case, +The type of service supported for the specified task type. In this case, `elser`. `service_settings`:: @@ -45,13 +48,31 @@ include::inference-shared.asciidoc[tag=service-settings] These settings are specific to the `elser` service. -- +`adaptive_allocations`::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`:::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`:::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `num_allocations`::: (Required, integer) -The total number of allocations this model is assigned across machine learning nodes. Increasing this value generally increases the throughput. +The total number of allocations this model is assigned across machine learning nodes. +Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `num_threads`::: (Required, integer) -Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. +Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Must be a power of 2. Max allowed value is 32. @@ -63,6 +84,9 @@ The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. +The request below will automatically download the ELSER model if it isn't +already downloaded and then deploy the model. + [source,console] ------------------------------------------------------------ PUT _inference/sparse_embedding/my-elser-model @@ -92,4 +116,39 @@ Example response: "task_settings": {} } ------------------------------------------------------------ -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== + +[discrete] +[[inference-example-elser-adaptive-allocation]] +==== Setting adaptive allocation for the ELSER service + +The following example shows how to create an {infer} endpoint called +`my-elser-model` to perform a `sparse_embedding` task type and configure +adaptive allocations. + +The request below will automatically download the ELSER model if it isn't +already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elser", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } + } +} +------------------------------------------------------------ +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ingest/apis/delete-geoip-database.asciidoc b/docs/reference/ingest/apis/delete-geoip-database.asciidoc new file mode 100644 index 0000000000000..957e59f0f0de4 --- /dev/null +++ b/docs/reference/ingest/apis/delete-geoip-database.asciidoc @@ -0,0 +1,55 @@ +[[delete-geoip-database-api]] +=== Delete geoip database configuration API +++++ +Delete geoip database configuration +++++ + +Deletes a geoip database configuration. + +[source,console] +---- +DELETE /_ingest/geoip/database/my-database-id +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + + +[[delete-geoip-database-api-request]] +==== {api-request-title} + +`DELETE /_ingest/geoip/database/` + +[[delete-geoip-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`manage` <> to use this API. + +[[delete-geoip-database-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +-- +(Required, string) Database configuration ID used to limit the request. + +-- + + +[[delete-geoip-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + + +[[delete-geoip-database-api-example]] +==== {api-examples-title} + + +[[delete-geoip-database-api-specific-ex]] +===== Delete a specific geoip database configuration + +[source,console] +---- +DELETE /_ingest/geoip/database/example-database-id +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/delete-pipeline.asciidoc b/docs/reference/ingest/apis/delete-pipeline.asciidoc index 6f50251dbf1cd..94ac87c61b56b 100644 --- a/docs/reference/ingest/apis/delete-pipeline.asciidoc +++ b/docs/reference/ingest/apis/delete-pipeline.asciidoc @@ -62,7 +62,7 @@ use a value of `*`. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[delete-pipeline-api-api-example]] +[[delete-pipeline-api-example]] ==== {api-examples-title} diff --git a/docs/reference/ingest/apis/geoip-stats-api.asciidoc b/docs/reference/ingest/apis/geoip-stats.asciidoc similarity index 100% rename from docs/reference/ingest/apis/geoip-stats-api.asciidoc rename to docs/reference/ingest/apis/geoip-stats.asciidoc diff --git a/docs/reference/ingest/apis/get-geoip-database.asciidoc b/docs/reference/ingest/apis/get-geoip-database.asciidoc new file mode 100644 index 0000000000000..f055e3e759db8 --- /dev/null +++ b/docs/reference/ingest/apis/get-geoip-database.asciidoc @@ -0,0 +1,80 @@ +[[get-geoip-database-api]] +=== Get geoip database configuration API +++++ +Get geoip database configuration +++++ + +Returns information about one or more geoip database configurations. + +[source,console] +---- +GET /_ingest/geoip/database/my-database-id +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + + + +[[get-geoip-database-api-request]] +==== {api-request-title} + +`GET /_ingest/geoip/database/` + +`GET /_ingest/geoip/database` + +[[get-geoip-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the + `manage` <> to use this API. + +[[get-geoip-database-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are +supported. ++ +To get all database configurations, omit this parameter or use `*`. + + +[[get-geoip-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] + + +[[get-geoip-database-api-example]] +==== {api-examples-title} + + +[[get-geoip-database-api-specific-ex]] +===== Get information for a specific geoip database configuration + +[source,console] +---- +GET /_ingest/geoip/database/my-database-id +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + +The API returns the following response: + +[source,console-result] +---- +{ + "databases" : [ + { + "id" : "my-database-id", + "version" : 1, + "modified_date_millis" : 1723040276114, + "database" : { + "name" : "GeoIP2-Domain", + "maxmind" : { + "account_id" : "1234567" + } + } + } + ] +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/get-pipeline.asciidoc b/docs/reference/ingest/apis/get-pipeline.asciidoc index 71a261d97bdeb..f2a1155bca12b 100644 --- a/docs/reference/ingest/apis/get-pipeline.asciidoc +++ b/docs/reference/ingest/apis/get-pipeline.asciidoc @@ -65,7 +65,7 @@ To get all ingest pipelines, omit this parameter or use `*`. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] -[[get-pipeline-api-api-example]] +[[get-pipeline-api-example]] ==== {api-examples-title} diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 04fcd500a9721..e068f99ea0ad3 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -13,7 +13,7 @@ Use the following APIs to create, manage, and test ingest pipelines: * <> to create or update a pipeline * <> to retrieve a pipeline configuration * <> to delete a pipeline -* <> to test a pipeline +* <> and <> to test ingest pipelines [discrete] [[ingest-stat-apis]] @@ -21,12 +21,27 @@ Use the following APIs to create, manage, and test ingest pipelines: Use the following APIs to get statistics about ingest processing: -* <> to get download statistics for GeoIP2 databases used with +* <> to get download statistics for IP geolocation databases used with the <>. +[discrete] +[[ingest-geoip-database-apis]] +=== Ingest GeoIP Database APIs + +preview::["The commercial IP geolocation database download management APIs are in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +Use the following APIs to configure and manage commercial IP geolocation database downloads: + +* <> to create or update a database configuration +* <> to retrieve a database configuration +* <> to delete a database configuration + include::put-pipeline.asciidoc[] -include::delete-pipeline.asciidoc[] -include::geoip-stats-api.asciidoc[] include::get-pipeline.asciidoc[] +include::delete-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] include::simulate-ingest.asciidoc[] +include::geoip-stats.asciidoc[] +include::put-geoip-database.asciidoc[] +include::get-geoip-database.asciidoc[] +include::delete-geoip-database.asciidoc[] diff --git a/docs/reference/ingest/apis/put-geoip-database.asciidoc b/docs/reference/ingest/apis/put-geoip-database.asciidoc new file mode 100644 index 0000000000000..311c303002387 --- /dev/null +++ b/docs/reference/ingest/apis/put-geoip-database.asciidoc @@ -0,0 +1,72 @@ +[[put-geoip-database-api]] +=== Create or update geoip database configuration API +++++ +Create or update geoip database configuration +++++ + +Creates or updates an IP geolocation database configuration. + +IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, +it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only +one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order +to download from Maxmind. The license key setting does not take effect until all nodes are restarted. + +[source,console] +---- +PUT _ingest/geoip/database/my-database-id +{ + "name": "GeoIP2-Domain", + "maxmind": { + "account_id": "1025402" + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + +[[put-geoip-database-api-request]] +==== {api-request-title} + +`PUT /_ingest/geoip/database/` + +[[put-geoip-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`manage` <> to use this API. + + +[[put-geoip-database-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +__ +(Required, string) ID of the database configuration to create or update. + +[[put-geoip-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +[[put-geoip-database-api-request-body]] +==== {api-request-body-title} + +// tag::geoip-database-object[] +`name`:: +(Required, string) +The provider-assigned name of the IP geolocation database to download. + +``:: +(Required, a provider object and its associated configuration) +The configuration necessary to identify which IP geolocation provider to use to download +the database, as well as any provider-specific configuration necessary for such downloading. ++ +At present, the only supported provider is `maxmind`, and the maxmind provider +requires that an `account_id` (string) is configured. +// end::geoip-database-object[] + +[[geoip-database-configuration-licensing]] +==== Licensing + +Downloading databases from third party providers is a commercial feature that requires an +appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 6642cdc2a74ce..4bd50641149c0 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -230,12 +230,12 @@ Instead, you can: [[ingest-enrich-components]] ==== Enrich components -The enrich coordinator is a component that manages and performs the searches +The enrich coordinator is a component that manages and performs the searches required to enrich documents on each ingest node. It combines searches from all enrich processors in all pipelines into bulk <>. -The enrich policy executor is a component that manages the executions of all -enrich policies. When an enrich policy is executed, this component creates +The enrich policy executor is a component that manages the executions of all +enrich policies. When an enrich policy is executed, this component creates a new enrich index and removes the previous enrich index. The enrich policy executions are managed from the elected master node. The execution of these policies occurs on a different node. @@ -249,9 +249,15 @@ enrich policy executor. The enrich coordinator supports the following node settings: `enrich.cache_size`:: -Maximum number of searches to cache for enriching documents. Defaults to `1000`. -There is a single cache for all enrich processors in the cluster. This setting -determines the size of that cache. +Maximum size of the cache that caches searches for enriching documents. +The size can be specified in three units: the raw number of +cached searches (e.g. `1000`), an absolute size in bytes (e.g. `100Mb`), +or a percentage of the max heap space of the node (e.g. `1%`). +Both for the absolute byte size and the percentage of heap space, +{es} does not guarantee that the enrich cache size will adhere exactly to that maximum, +as {es} uses the byte size of the serialized search response +which is is a good representation of the used space on the heap, but not an exact match. +Defaults to `1%`. There is a single cache for all enrich processors in the cluster. `enrich.coordinator_proxy.max_concurrent_requests`:: Maximum number of concurrent <> to @@ -280,4 +286,4 @@ Maximum number of enrich policies to execute concurrently. Defaults to `50`. include::geo-match-enrich-policy-type-ex.asciidoc[] include::match-enrich-policy-type-ex.asciidoc[] -include::range-enrich-policy-type-ex.asciidoc[] \ No newline at end of file +include::range-enrich-policy-type-ex.asciidoc[] diff --git a/docs/reference/ingest/processors/community-id.asciidoc b/docs/reference/ingest/processors/community-id.asciidoc index 03e65ac04a209..2d86bd21fa1e9 100644 --- a/docs/reference/ingest/processors/community-id.asciidoc +++ b/docs/reference/ingest/processors/community-id.asciidoc @@ -23,11 +23,12 @@ configuration is required. | `source_port` | no | `source.port` | Field containing the source port. | `destination_ip` | no | `destination.ip` | Field containing the destination IP address. | `destination_port` | no | `destination.port` | Field containing the destination port. -| `iana_number` | no | `network.iana_number` | Field containing the IANA number. The following protocol numbers are currently supported: `1` ICMP, `2` IGMP, `6` TCP, `17` UDP, `47` GRE, `58` ICMP IPv6, `88` EIGRP, `89` OSPF, `103` PIM, and `132` SCTP. +| `iana_number` | no | `network.iana_number` | Field containing the IANA number. | `icmp_type` | no | `icmp.type` | Field containing the ICMP type. | `icmp_code` | no | `icmp.code` | Field containing the ICMP code. -| `transport` | no | `network.transport` | Field containing the transport protocol. -Used only when the `iana_number` field is not present. +| `transport` | no | `network.transport` | Field containing the transport protocol name or number. +Used only when the `iana_number` field is not present. The following protocol names are currently supported: +`ICMP`, `IGMP`, `TCP`, `UDP`, `GRE`, `ICMP IPv6`, `EIGRP`, `OSPF`, `PIM`, and `SCTP`. | `target_field` | no | `network.community_id` | Output field for the community ID. | `seed` | no | `0` | Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The seed can prevent hash collisions between network domains, such as diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 738ac234d6162..3a9ba58dedbf0 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -24,6 +24,9 @@ stats API>>. If your cluster can't connect to the Elastic GeoIP endpoint or you want to manage your own updates, see <>. +If you would like to have {es} download database files directly from Maxmind using your own provided +license key, see <>. + If {es} can't connect to the endpoint for 30 days all updated databases will become invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` field instead. @@ -36,9 +39,9 @@ field instead. [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The field to get the ip address from for the geographical lookup. +| `field` | yes | - | The field to get the IP address from for the geographical lookup. | `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. -| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or the name of a supported database file in the `ingest-geoip` config directory. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). | `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document | `first_only` | no | `true` | If `true` only first found geoip data will be returned, even if `field` contains array @@ -64,12 +67,12 @@ depend on what has been found and which properties were configured in `propertie * If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, -`organization_name`, `network`, `isp`, `isp_organization`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, `country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, -`residential_proxy`, `domain`, `isp`, `isp_organization`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 88d97d9422d5e..982da1fe17f7a 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -40,6 +40,11 @@ include::common-options.asciidoc[] Select the `content` field for inference and write the result to `content_embedding`. +IMPORTANT: If the specified `output_field` already exists in the ingest document, it won't be overwritten. +The {infer} results will be appended to the existing fields within `output_field`, which could lead to duplicate fields and potential errors. +To avoid this, use an unique `output_field` field name that does not clash with any existing fields. + + [source,js] -------------------------------------------------- { diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 3fc23b44994a7..cd9c126e7b1fd 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -1,42 +1,70 @@ [[elasticsearch-intro]] == What is {es}? -_**You know, for search (and analysis)**_ - -{es} is the distributed search and analytics engine at the heart of -the {stack}. {ls} and {beats} facilitate collecting, aggregating, and -enriching your data and storing it in {es}. {kib} enables you to -interactively explore, visualize, and share insights into your data and manage -and monitor the stack. {es} is where the indexing, search, and analysis -magic happens. - -{es} provides near real-time search and analytics for all types of data. Whether you -have structured or unstructured text, numerical data, or geospatial data, -{es} can efficiently store and index it in a way that supports fast searches. -You can go far beyond simple data retrieval and aggregate information to discover -trends and patterns in your data. And as your data and query volume grows, the -distributed nature of {es} enables your deployment to grow seamlessly right -along with it. - -While not _every_ problem is a search problem, {es} offers speed and flexibility -to handle data in a wide variety of use cases: - -* Add a search box to an app or website -* Store and analyze logs, metrics, and security event data -* Use machine learning to automatically model the behavior of your data in real - time -* Use {es} as a vector database to create, store, and search vector embeddings -* Automate business workflows using {es} as a storage engine -* Manage, integrate, and analyze spatial information using {es} as a geographic - information system (GIS) -* Store and process genetic data using {es} as a bioinformatics research tool - -We’re continually amazed by the novel ways people use search. But whether -your use case is similar to one of these, or you're using {es} to tackle a new -problem, the way you work with your data, documents, and indices in {es} is -the same. + +{es-repo}[{es}] is a distributed search and analytics engine, scalable data store, and vector database built on Apache Lucene. +It's optimized for speed and relevance on production-scale workloads. +Use {es} to search, index, store, and analyze data of all shapes and sizes in near real time. + +[TIP] +==== +{es} has a lot of features. Explore the full list on the https://www.elastic.co/elasticsearch/features[product webpage^]. +==== + +{es} is the heart of the {estc-welcome-current}/stack-components.html[Elastic Stack] and powers the Elastic https://www.elastic.co/enterprise-search[Search], https://www.elastic.co/observability[Observability] and https://www.elastic.co/security[Security] solutions. + +{es} is used for a wide and growing range of use cases. Here are a few examples: + +* *Monitor log and event data*. Store logs, metrics, and event data for observability and security information and event management (SIEM). +* *Build search applications*. Add search capabilities to apps or websites, or build enterprise search engines over your organization's internal data sources. +* *Vector database*. Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models. +* *Retrieval augmented generation (RAG)*. Use {es} as a retrieval engine to augment Generative AI models. +* *Application and security monitoring*. Monitor and analyze application performance and security data effectively. +* *Machine learning*. Use {ml} to automatically model the behavior of your data in real-time. + +This is just a sample of search, observability, and security use cases enabled by {es}. +Refer to our https://www.elastic.co/customers/success-stories[customer success stories] for concrete examples across a range of industries. +// Link to demos, search labs chatbots + +[discrete] +[[elasticsearch-intro-elastic-stack]] +.What is the Elastic Stack? +******************************* +{es} is the core component of the Elastic Stack, a suite of products for collecting, storing, searching, and visualizing data. +https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current/stack-components.html[Learn more about the Elastic Stack]. +******************************* +// TODO: Remove once we've moved Stack Overview to a subpage? + +[discrete] +[[elasticsearch-intro-deploy]] +=== Deployment options + +To use {es}, you need a running instance of the {es} service. +You can deploy {es} in various ways: + +* <>. Get started quickly with a minimal local Docker setup. +* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]. {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14 day free trial]. +* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]. Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14 day free trial]. + +**Advanced deployment options** + +* <>. Install, configure, and run {es} on your own premises. +* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]. Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. +* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]. Deploy Elastic Cloud on Kubernetes. + +[discrete] +[[elasticsearch-next-steps]] +=== Learn more + +Here are some resources to help you get started: + +* <>. A beginner's guide to deploying your first {es} instance, indexing data, and running queries. +* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]. Register for our live webinars to learn directly from {es} experts. +* https://www.elastic.co/search-labs[Elastic Search Labs]. Tutorials and blogs that explore AI-powered search using the latest {es} features. +** Follow our tutorial https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[to build a hybrid search solution in Python]. +** Check out the https://github.com/elastic/elasticsearch-labs?tab=readme-ov-file#elasticsearch-examples--apps[`elasticsearch-labs` repository] for a range of Python notebooks and apps for various use cases. [[documents-indices]] -=== Data in: documents and indices +=== Documents and indices {es} is a distributed document store. Instead of storing information as rows of columnar data, {es} stores complex data structures that have been serialized @@ -65,8 +93,7 @@ behavior makes it easy to index and explore your data--just start indexing documents and {es} will detect and map booleans, floating point and integer values, dates, and strings to the appropriate {es} data types. -Ultimately, however, you know more about your data and how you want to use it -than {es} can. You can define rules to control dynamic mapping and explicitly +You can define rules to control dynamic mapping and explicitly define mappings to take full control of how fields are stored and indexed. Defining your own mappings enables you to: @@ -89,7 +116,7 @@ used at search time. When you query a full-text field, the query text undergoes the same analysis before the terms are looked up in the index. [[search-analyze]] -=== Information out: search and analyze +=== Search and analyze While you can use {es} as a document store and retrieve documents and their metadata, the real power comes from being able to easily access the full suite @@ -160,27 +187,8 @@ size 70 needles, you’re displaying a count of the size 70 needles that match your users' search criteria--for example, all size 70 _non-stick embroidery_ needles. -[discrete] -[[more-features]] -===== But wait, there’s more - -Want to automate the analysis of your time series data? You can use -{ml-docs}/ml-ad-overview.html[machine learning] features to create accurate -baselines of normal behavior in your data and identify anomalous patterns. With -machine learning, you can detect: - -* Anomalies related to temporal deviations in values, counts, or frequencies -* Statistical rarity -* Unusual behaviors for a member of a population - -And the best part? You can do this without having to specify algorithms, models, -or other data science-related configurations. - [[scalability]] -=== Scalability and resilience: clusters, nodes, and shards -++++ -Scalability and resilience -++++ +=== Scalability and resilience {es} is built to be always available and to scale with your needs. It does this by being distributed by nature. You can add servers (nodes) to a cluster to @@ -209,7 +217,7 @@ interrupting indexing or query operations. [discrete] [[it-depends]] -==== It depends... +==== Shard size and number of shards There are a number of performance considerations and trade offs with respect to shard size and the number of primary shards configured for an index. The more @@ -237,7 +245,7 @@ testing with your own data and queries]. [discrete] [[disaster-ccr]] -==== In case of disaster +==== Disaster recovery A cluster's nodes need good, reliable connections to each other. To provide better connections, you typically co-locate the nodes in the same data center or @@ -257,7 +265,7 @@ secondary clusters are read-only followers. [discrete] [[admin]] -==== Care and feeding +==== Security, management, and monitoring As with any enterprise system, you need tools to secure, manage, and monitor your {es} clusters. Security, monitoring, and administrative features @@ -265,3 +273,5 @@ that are integrated into {es} enable you to use {kibana-ref}/introduction.html[{ as a control center for managing a cluster. Features like <> and <> help you intelligently manage your data over time. + +Refer to <> for more information. \ No newline at end of file diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index 10eebfb027736..b26ceac349a3e 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -64,16 +64,104 @@ Some important points: * It is the field _value_ which is copied, not the terms (which result from the analysis process). * The original <> field will not be modified to show the copied values. * The same value can be copied to multiple fields, with `"copy_to": [ "field_1", "field_2" ]` -* You cannot copy recursively via intermediary fields such as a `copy_to` on -`field_1` to `field_2` and `copy_to` on `field_2` to `field_3` expecting -indexing into `field_1` will eventuate in `field_3`, instead use copy_to -directly to multiple fields from the originating field. +* You cannot copy recursively using intermediary fields. +The following configuration will not copy data from `field_1` to `field_3`: ++ +[source,console] +---- +PUT bad_example_index +{ + "mappings": { + "properties": { + "field_1": { + "type": "text", + "copy_to": "field_2" + }, + "field_2": { + "type": "text", + "copy_to": "field_3" + }, + "field_3": { + "type": "text" + } + } + } +} +---- +Instead, copy to multiple fields from the source field: ++ +[source,console] +---- +PUT good_example_index +{ + "mappings": { + "properties": { + "field_1": { + "type": "text", + "copy_to": ["field_2", "field_3"] + }, + "field_2": { + "type": "text" + }, + "field_3": { + "type": "text" + } + } + } +} +---- + +NOTE: `copy_to` is not supported for field types where values take the form of objects, e.g. `date_range`. + +[float] +[[copy-to-dynamic-mapping]] +==== Dynamic mapping + +Consider the following points when using `copy_to` with dynamic mappings: + * If the target field does not exist in the index mappings, the usual <> behavior applies. By default, with <> set to `true`, a non-existent target field will be -dynamically added to the index mappings. If `dynamic` is set to `false`, the +dynamically added to the index mappings. +* If `dynamic` is set to `false`, the target field will not be added to the index mappings, and the value will not be -copied. If `dynamic` is set to `strict`, copying to a non-existent field will +copied. +* If `dynamic` is set to `strict`, copying to a non-existent field will result in an error. ++ +** If the target field is nested, then `copy_to` fields must specify the full path to the nested field. +Omitting the full path will lead to a `strict_dynamic_mapping_exception`. +Use `"copy_to": ["parent_field.child_field"]` to correctly target a nested field. ++ +For example: ++ +[source,console] +-------------------------------------------------- +PUT /test_index +{ + "mappings": { + "dynamic": "strict", + "properties": { + "description": { + "properties": { + "notes": { + "type": "text", + "copy_to": [ "description.notes_raw"], <1> + "analyzer": "standard", + "search_analyzer": "standard" + }, + "notes_raw": { + "type": "keyword" + } + } + } + } + } +} +-------------------------------------------------- -NOTE: `copy_to` is _not_ supported for field types where values take the form of objects, e.g. `date_range` \ No newline at end of file +<1> The `notes` field is copied to the `notes_raw` field. Targeting `notes_raw` alone instead of `description.notes_raw` +would lead to a `strict_dynamic_mapping_exception`. ++ +In this example, `notes_raw` is not defined at the root of the mapping, but under the `description` field. +Without the fully qualified path, {es} would interpret the `copy_to` target as a root-level field, not as a nested field under `description`. \ No newline at end of file diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index e081b122355bb..32f3d13edf581 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -223,6 +223,13 @@ The following parameters are accepted by `boolean` fields: Metadata about the field. +`time_series_dimension`:: +(Optional, Boolean) ++ +-- +include::keyword.asciidoc[tag=dimension] +-- + [[boolean-synthetic-source]] ==== Synthetic `_source` diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index f2f0b3ae8bb23..0cd9ee0578b70 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -448,3 +448,63 @@ POST /my-bit-vectors/_search?filter_path=hits.hits } ---- +==== Updatable field type + +To better accommodate scaling and performance needs, updating the `type` setting in `index_options` is possible with the <>, according to the following graph (jumps allowed): + +[source,txt] +---- +flat --> int8_flat --> int4_flat --> hnsw --> int8_hnsw --> int4_hnsw +---- + +For updating all HNSW types (`hnsw`, `int8_hnsw`, `int4_hnsw`) the number of connections `m` must either stay the same or increase. For scalar quantized formats (`int8_flat`, `int4_flat`, `int8_hnsw`, `int4_hnsw`) the `confidence_interval` must always be consistent (once defined, it cannot change). + +Updating `type` in `index_options` will fail in all other scenarios. + +Switching `types` won't re-index vectors that have already been indexed (they will keep using their original `type`), vectors being indexed after the change will use the new `type` instead. + +For example, it's possible to define a dense vector field that utilizes the `flat` type (raw float32 arrays) for a first batch of data to be indexed. + +[source,console] +-------------------------------------------------- +PUT my-index-000001 +{ + "mappings": { + "properties": { + "text_embedding": { + "type": "dense_vector", + "dims": 384, + "index_options": { + "type": "flat" + } + } + } + } +} +-------------------------------------------------- + +Changing the `type` to `int4_hnsw` makes sure vectors indexed after the change will use an int4 scalar quantized representation and HNSW (e.g., for KNN queries). +That includes new segments created by <> previously created segments. + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_mapping +{ + "properties": { + "text_embedding": { + "type": "dense_vector", + "dims": 384, + "index_options": { + "type": "int4_hnsw" + } + } + } +} +-------------------------------------------------- +// TEST[setup:my_index] + +Vectors indexed before this change will keep using the `flat` type (raw float32 representation and brute force search for KNN queries). + +In order to have all the vectors updated to the new type, either reindexing or force merging should be used. + +For debugging purposes, it's possible to inspect how many segments (and docs) exist for each `type` with the <>. diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 6ee30e6b9f831..a006f288dc66d 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -7,8 +7,8 @@ beta[] -The `semantic_text` field type automatically generates embeddings for text -content using an inference endpoint. +The `semantic_text` field type automatically generates embeddings for text content using an inference endpoint. +Long passages are <> to smaller sections to enable the processing of larger corpuses of text. The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. @@ -52,8 +52,8 @@ Use the <> to create the endpoint. The `inference_id` will not be validated when the mapping is created, but when documents are ingested into the index. When the first document is indexed, the `inference_id` will be used to generate underlying indexing structures for the field. -WARNING: Removing an inference endpoint will cause ingestion of documents and semantic queries to fail on indices that define `semantic_text` fields with that inference endpoint as their `inference_id`. -Please check that inference endpoints are not used in `semantic_text` fields before removal. +WARNING: Removing an {infer} endpoint will cause ingestion of documents and semantic queries to fail on indices that define `semantic_text` fields with that {infer} endpoint as their `inference_id`. +Trying to <> that is used on a `semantic_text` field will result in an error. [discrete] [[auto-text-chunking]] @@ -65,6 +65,9 @@ To allow for large amounts of text to be used in semantic search, `semantic_text Each chunk will include the text subpassage and the corresponding embedding generated from it. When querying, the individual passages will be automatically searched for each document, and the most relevant passage will be used to compute a score. +Documents are split into 250-word sections with a 100-word overlap so that each section shares 100 words with the previous section. +This overlap ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. + [discrete] [[semantic-text-structure]] @@ -118,13 +121,19 @@ In case you want to customize data indexing, use the <> or <> field types and create an ingest pipeline with an <> to generate the embeddings. -<> walks you through the process. +<> walks you through the process. In +these cases - when you use `sparse_vector` or `dense_vector` field types instead +of the `semantic_text` field type to customize indexing - using the +<> is not supported for querying the +field data. + [discrete] [[update-script]] ==== Updates to `semantic_text` fields -Updates that use scripts are not supported when the index contains a `semantic_text` field. +Updates that use scripts are not supported for an index contains a `semantic_text` field. +Even if the script targets non-`semantic_text` fields, the update will fail when the index contains a `semantic_text` field. [discrete] diff --git a/docs/reference/migration/migrate_8_15.asciidoc b/docs/reference/migration/migrate_8_15.asciidoc index a183e68a50693..1961230da1bbf 100644 --- a/docs/reference/migration/migrate_8_15.asciidoc +++ b/docs/reference/migration/migrate_8_15.asciidoc @@ -16,5 +16,125 @@ coming::[8.15.0] [[breaking-changes-8.15]] === Breaking changes -There are no breaking changes in {es} 8.15. +The following changes in {es} 8.15 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.15, review these changes and take the described steps +to mitigate the impact. + +[discrete] +[[breaking_815_cluster_and_node_setting_changes]] +==== Cluster and node setting changes + +[[change_skip_unavailable_remote_cluster_setting_default_value_to_true]] +.Change `skip_unavailable` remote cluster setting default value to true +[%collapsible] +==== +*Details* + +The default value of the `skip_unavailable` setting is now set to true. All existing and future remote clusters that do not define this setting will use the new default. This setting only affects cross-cluster searches using the _search or _async_search API. + +*Impact* + +Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be set explicitly. +==== + +[discrete] +[[breaking_815_rollup_changes]] +==== Rollup changes + +[[disallow_new_rollup_jobs_in_clusters_with_no_rollup_usage]] +.Disallow new rollup jobs in clusters with no rollup usage +[%collapsible] +==== +*Details* + +The put rollup API will fail with an error when a rollup job is created in a cluster with no rollup usage + +*Impact* + +Clusters with no rollup usage (either no rollup job or index) can not create new rollup jobs +==== + +[discrete] +[[breaking_815_rest_api_changes]] +==== REST API changes + +[[interpret_timeout_1_as_infinite_ack_timeout]] +.Interpret `?timeout=-1` as infinite ack timeout +[%collapsible] +==== +*Details* + +Today {es} accepts the parameter `?timeout=-1` in many APIs, but interprets +this to mean the same as `?timeout=0`. From 8.15 onwards `?timeout=-1` will +mean to wait indefinitely, aligning the behaviour of this parameter with +other similar parameters such as `?master_timeout`. + +*Impact* + +Use `?timeout=0` to force relevant operations to time out immediately +instead of `?timeout=-1` +==== + +[[replace_model_id_with_inference_id]] +.Replace `model_id` with `inference_id` in GET inference API +[%collapsible] +==== +*Details* + +From 8.15 onwards the <> response will return an +`inference_id` field instead of a `model_id`. + +*Impact* + +If your application uses the `model_id` in a GET inference API response, +switch it to use `inference_id` instead. +==== + + +[discrete] +[[deprecated-8.15]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.15 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.15. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_815_cluster_and_node_setting]] +==== Cluster and node setting deprecations + +[[deprecate_absolute_size_values_for_indices_breaker_total_limit_setting]] +.Deprecate absolute size values for `indices.breaker.total.limit` setting +[%collapsible] +==== +*Details* + +Previously, the value of `indices.breaker.total.limit` could be specified as an absolute size in bytes. This setting controls the overal amount of memory the server is allowed to use before taking remedial actions. Setting this to a specific number of bytes led to strange behaviour when the node maximum heap size changed because the circut breaker limit would remain unchanged. This would either leave the value too low, causing part of the heap to remain unused; or it would leave the value too high, causing the circuit breaker to be ineffective at preventing OOM errors. The only reasonable behaviour for this setting is that it scales with the size of the heap, and so absolute byte limits are now deprecated. + +*Impact* + +Users must change their configuration to specify a percentage instead of an absolute number of bytes for `indices.breaker.total.limit`, or else accept the default, which is already specified as a percentage. +==== + +[discrete] +[[deprecations_815_rest_api]] +==== REST API deprecations + +[[deprecate_text_expansion_weighted_tokens_queries]] +.Deprecate `text_expansion` and `weighted_tokens` queries +[%collapsible] +==== +*Details* + +The `text_expansion` and `weighted_tokens` queries have been replaced by `sparse_vector`. + +*Impact* + +Please update your existing `text_expansion` and `weighted_tokens` queries to use `sparse_vector.` +==== + +[[deprecate_using_slm_privileges_to_access_ilm]] +.Deprecate using slm privileges to access ilm +[%collapsible] +==== +*Details* + +The `read_slm` privilege can get the ILM status, and the `manage_slm` privilege can start and stop ILM. Access to these APIs should be granted using the `read_ilm` and `manage_ilm` privileges instead. Access to ILM APIs will be removed from SLM privileges in a future major release, and is now deprecated. + +*Impact* + +Users that need access to the ILM status API should now use the `read_ilm` privilege. Users that need to start and stop ILM, should use the `manage_ilm` privilege. +==== diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 012904a9affa7..7bf02e7a0dd6e 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -105,6 +105,20 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules] (array) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-actions] +//Begin analysis_config.detectors.custom_rules.params +`params`::: +(object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-params] ++ +.Properties of `params` +[%collapsible%open] +======= +`force_time_shift`:::: +(object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-params-force-time-shift] +======= +//End analysis_config.detectors.custom_rules.params + //Begin analysis_config.detectors.custom_rules.conditions `conditions`::: (array) diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index 6953235c854cb..ee13247fc8838 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -15,7 +15,7 @@ Updates certain properties of an {anomaly-job}. [[ml-update-job-prereqs]] == {api-prereq-title} -Requires the `manage_ml` cluster privilege. This privilege is included in the +Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. [[ml-update-job-path-parms]] @@ -51,7 +51,7 @@ You can update the `analysis_limits` only while the job is closed. [%collapsible%open] ==== `model_memory_limit`::: -(long or string) +(long or string) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-memory-limit-ad] + -- @@ -61,8 +61,8 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-memory-limit-ad] determine the current usage, refer to the `model_bytes` value in the <> API. * If the `memory_status` property in the -<> has a value of -`hard_limit`, this means that it was unable to process some data. You might want +<> has a value of +`hard_limit`, this means that it was unable to process some data. You might want to re-run the job with an increased `model_memory_limit`. ======= -- @@ -111,6 +111,21 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules] (array) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-actions] +//Begin analysis_config.detectors.custom_rules.params +`params`::: +(object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-params] ++ +.Properties of `params` +[%collapsible%open] +======= +[[force-time-shift-params]] +`force_time_shift`:::: +(object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=custom-rules-params-force-time-shift] +======= +//End analysis_config.detectors.custom_rules.params + // Begin detectors.custom_rules.conditions `conditions`::: (array) diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index 5c061daa1cd44..63a0f047db647 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -52,6 +52,12 @@ detects anomalies where the geographic location of a credit card transaction is unusual for a particular customer’s credit card. An anomaly might indicate fraud. +A "typical" value indicates a centroid of a cluster of previously observed +locations that is closest to the "actual" location at that time. For example, +there may be one centroid near the person's home that is associated with the +cluster of local grocery stores and restaurants, and another centroid near the +person's work associated with the cluster of lunch and coffee places. + IMPORTANT: The `field_name` that you supply must be a single string that contains two comma-separated numbers of the form `latitude,longitude`, a `geo_point` field, a `geo_shape` field that contains point values, or a diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index a69fd2f1812e9..44c2012f502e1 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -1,3 +1,27 @@ +tag::adaptive-allocation[] +Adaptive allocations configuration object. +If enabled, the number of allocations of the model is set based on the current load the process gets. +When the load is high, a new model allocation is automatically created (respecting the value of `max_number_of_allocations` if it's set). +When the load is low, a model allocation is automatically removed (respecting the value of `min_number_of_allocations` if it's set). +The number of model allocations cannot be scaled down to less than `1` this way. +If `adaptive_allocations` is enabled, do not set the number of allocations manually. +end::adaptive-allocation[] + +tag::adaptive-allocation-enabled[] +If `true`, `adaptive_allocations` is enabled. +Defaults to `false`. +end::adaptive-allocation-enabled[] + +tag::adaptive-allocation-max-number[] +Specifies the maximum number of allocations to scale to. +If set, it must be greater than or equal to `min_number_of_allocations`. +end::adaptive-allocation-max-number[] + +tag::adaptive-allocation-min-number[] +Specifies the minimum number of allocations to scale to. +If set, it must be greater than or equal to `1`. +end::adaptive-allocation-min-number[] + tag::aggregations[] If set, the {dfeed} performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. For more information, @@ -360,6 +384,10 @@ model. Unless you also specify `skip_result`, the results will be created as usual. This action is suitable when certain values are expected to be consistently anomalous and they affect the model in a way that negatively impacts the rest of the results. +* `force_time_shift`: This action will shift the time inside the anomaly detector by a specified +amount. This is useful, e.g. to quickly adapt to the daylight saving time events that +are known beforehand. This action requires a `force_time_shift` parameter +in the `params` object. end::custom-rules-actions[] tag::custom-rules-scope[] @@ -404,6 +432,16 @@ tag::custom-rules-conditions-value[] The value that is compared against the `applies_to` field using the `operator`. end::custom-rules-conditions-value[] +tag::custom-rules-params[] +A set of parameter objects that customize the actions defined in the custom rules +actions array. The available parameters (depending on the specified actions) include: +`force_time_shift`. +end::custom-rules-params[] + +tag::custom-rules-params-force-time-shift[] +Set `time_shift_amount` to the signed number of seconds by which you want to shift the time. +end::custom-rules-params-force-time-shift[] + tag::custom-settings[] Advanced configuration option. Contains custom metadata about the job. For example, it can contain custom URL information as shown in diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index f1b3fffb8a9a2..6f7e2a4d9f988 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -30,7 +30,10 @@ must be unique and should not match any other deployment ID or model ID, unless it is the same as the ID of the model being deployed. If `deployment_id` is not set, it defaults to the `model_id`. -Scaling inference performance can be achieved by setting the parameters +You can enable adaptive allocations to automatically scale model allocations up +and down based on the actual resource requirement of the processes. + +Manually scaling inference performance can be achieved by setting the parameters `number_of_allocations` and `threads_per_allocation`. Increasing `threads_per_allocation` means more threads are used when an @@ -58,6 +61,46 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-id] [[start-trained-model-deployment-query-params]] == {api-query-parms-title} +`deployment_id`:: +(Optional, string) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] ++ +-- +Defaults to `model_id`. +-- + +`timeout`:: +(Optional, time) +Controls the amount of time to wait for the model to deploy. Defaults to 30 +seconds. + +`wait_for`:: +(Optional, string) +Specifies the allocation status to wait for before returning. Defaults to +`started`. The value `starting` indicates deployment is starting but not yet on +any node. The value `started` indicates the model has started on at least one +node. The value `fully_allocated` indicates the deployment has started on all +valid nodes. + +[[start-trained-model-deployment-request-body]] +== {api-request-body-title} + +`adaptive_allocations`:: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `cache_size`:: (Optional, <>) The inference cache size (in memory outside the JVM heap) per node for the @@ -65,15 +108,11 @@ model. In serverless, the cache is disabled by default. Otherwise, the default v `model_size_bytes` field in the <>. To disable the cache, `0b` can be provided. -`deployment_id`:: -(Optional, string) -include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] -Defaults to `model_id`. - `number_of_allocations`:: (Optional, integer) The total number of allocations this model is assigned across {ml} nodes. -Increasing this value generally increases the throughput. Defaults to 1. +Increasing this value generally increases the throughput. Defaults to `1`. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. `priority`:: (Optional, string) @@ -110,18 +149,6 @@ compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Defaults to 1. Must be a power of 2. Max allowed value is 32. -`timeout`:: -(Optional, time) -Controls the amount of time to wait for the model to deploy. Defaults to 30 -seconds. - -`wait_for`:: -(Optional, string) -Specifies the allocation status to wait for before returning. Defaults to -`started`. The value `starting` indicates deployment is starting but not yet on -any node. The value `started` indicates the model has started on at least one -node. The value `fully_allocated` indicates the deployment has started on all -valid nodes. [[start-trained-model-deployment-example]] == {api-examples-title} @@ -182,3 +209,24 @@ The `my_model` trained model can be deployed again with a different ID: POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_search -------------------------------------------------- // TEST[skip:TBD] + + +[[start-trained-model-deployment-adaptive-allocation-example]] +=== Setting adaptive allocations + +The following example starts a new deployment of the `my_model` trained model +with the ID `my_model_for_search` and enables adaptive allocations with the +minimum number of 3 allocations and the maximum number of 10. + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_search +{ + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } +} +-------------------------------------------------- +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc index ea5508fac26dd..d49ee3c6e872c 100644 --- a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc @@ -25,7 +25,11 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the == {api-description-title} You can update a trained model deployment whose `assignment_state` is `started`. -You can either increase or decrease the number of allocations of such a deployment. +You can enable adaptive allocations to automatically scale model allocations up +and down based on the actual resource requirement of the processes. +Or you can manually increase or decrease the number of allocations of a model +deployment. + [[update-trained-model-deployments-path-parms]] == {api-path-parms-title} @@ -37,17 +41,34 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id] [[update-trained-model-deployment-request-body]] == {api-request-body-title} +`adaptive_allocations`:: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + +`enabled`::: +(Optional, Boolean) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] + +`max_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-max-number] + +`min_number_of_allocations`::: +(Optional, integer) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] + `number_of_allocations`:: (Optional, integer) The total number of allocations this model is assigned across {ml} nodes. Increasing this value generally increases the throughput. +If `adaptive_allocations` is enabled, do not set this value, because it's automatically set. [[update-trained-model-deployment-example]] == {api-examples-title} The following example updates the deployment for a - `elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to have 4 allocations: +`elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to have 4 allocations: [source,console] -------------------------------------------------- @@ -84,3 +105,21 @@ The API returns the following results: } } ---- + +The following example updates the deployment for a +`elastic__distilbert-base-uncased-finetuned-conll03-english` trained model to +enable adaptive allocations with the minimum number of 3 allocations and the +maximum number of 10: + +[source,console] +-------------------------------------------------- +POST _ml/trained_models/elastic__distilbert-base-uncased-finetuned-conll03-english/deployment/_update +{ + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + } +} +-------------------------------------------------- +// TEST[skip:TBD] \ No newline at end of file diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc index 4b9ede5450683..b3eaa5b47c238 100644 --- a/docs/reference/modules/cluster.asciidoc +++ b/docs/reference/modules/cluster.asciidoc @@ -1,9 +1,7 @@ [[modules-cluster]] === Cluster-level shard allocation and routing settings -_Shard allocation_ is the process of allocating shards to nodes. This can -happen during initial recovery, replica allocation, rebalancing, or -when nodes are added or removed. +include::{es-ref-dir}/modules/shard-allocation-desc.asciidoc[] One of the main roles of the master is to decide which shards to allocate to which nodes, and when to move shards between nodes in order to rebalance the diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index 2308ec259da48..537783ef6ff01 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -6,7 +6,10 @@ mode are described separately. `cluster.remote..mode`:: The mode used for a remote cluster connection. The only supported modes are - `sniff` and `proxy`. + `sniff` and `proxy`. The default is `sniff`. See <> for + further information about these modes, and <> + and <> for further information about their + settings. `cluster.remote.initial_connect_timeout`:: @@ -97,6 +100,11 @@ you configure the remotes. [[remote-cluster-sniff-settings]] ==== Sniff mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: sniff` and then configure the following +settings. You may also leave `cluster.remote..mode` unset since +`sniff` is the default mode. + `cluster.remote..seeds`:: The list of seed nodes used to sniff the remote cluster state. @@ -117,6 +125,10 @@ you configure the remotes. [[remote-cluster-proxy-settings]] ==== Proxy mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: proxy` and then configure the following +settings. + `cluster.remote..proxy_address`:: The address used for all remote connections. diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index dc53837125ee9..dab5d61a792cb 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -15,7 +15,8 @@ Enable or disable allocation for specific kinds of shards: * `new_primaries` - Allows shard allocation only for primary shards for new indices. * `none` - No shard allocations of any kind are allowed for any indices. -This setting does not affect the recovery of local primary shards when +This setting only affects future allocations, and does not re-allocate or un-allocate currently allocated shards. +It also does not affect the recovery of local primary shards when restarting a node. A restarted node that has a copy of an unassigned primary shard will recover that primary immediately, assuming that its allocation id matches one of the active allocation ids in the cluster state. diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc index dfa49e5b0d9af..21f4ae2317e6a 100644 --- a/docs/reference/modules/discovery/fault-detection.asciidoc +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -35,268 +35,30 @@ starting from the beginning of the cluster state update. Refer to [[cluster-fault-detection-troubleshooting]] ==== Troubleshooting an unstable cluster -//tag::troubleshooting[] -Normally, a node will only leave a cluster if deliberately shut down. If a node -leaves the cluster unexpectedly, it's important to address the cause. A cluster -in which nodes leave unexpectedly is unstable and can create several issues. -For instance: -* The cluster health may be yellow or red. - -* Some shards will be initializing and other shards may be failing. - -* Search, indexing, and monitoring operations may fail and report exceptions in -logs. - -* The `.security` index may be unavailable, blocking access to the cluster. - -* The master may appear busy due to frequent cluster state updates. - -To troubleshoot a cluster in this state, first ensure the cluster has a -<>. Next, focus on the nodes -unexpectedly leaving the cluster ahead of all other issues. It will not be -possible to solve other issues until the cluster has a stable master node and -stable node membership. - -Diagnostics and statistics are usually not useful in an unstable cluster. These -tools only offer a view of the state of the cluster at a single point in time. -Instead, look at the cluster logs to see the pattern of behaviour over time. -Focus particularly on logs from the elected master. When a node leaves the -cluster, logs for the elected master include a message like this (with line -breaks added to make it easier to read): - -[source,text] ----- -[2022-03-21T11:02:35,513][INFO ][o.e.c.c.NodeLeftExecutor] [instance-0000000000] - node-left: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m}] - with reason [disconnected] ----- - -This message says that the `NodeLeftExecutor` on the elected master -(`instance-0000000000`) processed a `node-left` task, identifying the node that -was removed and the reason for its removal. When the node joins the cluster -again, logs for the elected master will include a message like this (with line -breaks added to make it easier to read): - -[source,text] ----- -[2022-03-21T11:02:59,892][INFO ][o.e.c.c.NodeJoinExecutor] [instance-0000000000] - node-join: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{UNw_RuazQCSBskWZV8ID_w}{172.27.47.21}{172.27.47.21:19054}{m}] - with reason [joining after restart, removed [24s] ago with reason [disconnected]] ----- - -This message says that the `NodeJoinExecutor` on the elected master -(`instance-0000000000`) processed a `node-join` task, identifying the node that -was added to the cluster and the reason for the task. - -Other nodes may log similar messages, but report fewer details: - -[source,text] ----- -[2020-01-29T11:02:36,985][INFO ][o.e.c.s.ClusterApplierService] - [instance-0000000001] removed { - {instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m} - {tiebreaker-0000000003}{UNw_RuazQCSBskWZV8ID_w}{bltyVOQ-RNu20OQfTHSLtA}{172.27.161.154}{172.27.161.154:19251}{mv} - }, term: 14, version: 1653415, reason: Publication{term=14, version=1653415} ----- - -These messages are not especially useful for troubleshooting, so focus on the -ones from the `NodeLeftExecutor` and `NodeJoinExecutor` which are only emitted -on the elected master and which contain more details. If you don't see the -messages from the `NodeLeftExecutor` and `NodeJoinExecutor`, check that: - -* You're looking at the logs for the elected master node. - -* The logs cover the correct time period. - -* Logging is enabled at `INFO` level. - -Nodes will also log a message containing `master node changed` whenever they -start or stop following the elected master. You can use these messages to -determine each node's view of the state of the master over time. - -If a node restarts, it will leave the cluster and then join the cluster again. -When it rejoins, the `NodeJoinExecutor` will log that it processed a -`node-join` task indicating that the node is `joining after restart`. If a node -is unexpectedly restarting, look at the node's logs to see why it is shutting -down. - -The <> API on the affected node will also provide some useful -information about the situation. - -If the node did not restart then you should look at the reason for its -departure more closely. Each reason has different troubleshooting steps, -described below. There are three possible reasons: - -* `disconnected`: The connection from the master node to the removed node was -closed. - -* `lagging`: The master published a cluster state update, but the removed node -did not apply it within the permitted timeout. By default, this timeout is 2 -minutes. Refer to <> for information about the -settings which control this mechanism. - -* `followers check retry count exceeded`: The master sent a number of -consecutive health checks to the removed node. These checks were rejected or -timed out. By default, each health check times out after 10 seconds and {es} -removes the node removed after three consecutively failed health checks. Refer -to <> for information about the settings which -control this mechanism. +See <>. [discrete] ===== Diagnosing `disconnected` nodes -Nodes typically leave the cluster with reason `disconnected` when they shut -down, but if they rejoin the cluster without restarting then there is some -other problem. - -{es} is designed to run on a fairly reliable network. It opens a number of TCP -connections between nodes and expects these connections to remain open forever. -If a connection is closed then {es} will try and reconnect, so the occasional -blip should have limited impact on the cluster even if the affected node -briefly leaves the cluster. In contrast, repeatedly-dropped connections will -severely affect its operation. - -The connections from the elected master node to every other node in the cluster -are particularly important. The elected master never spontaneously closes its -outbound connections to other nodes. Similarly, once a connection is fully -established, a node never spontaneously close its inbound connections unless -the node is shutting down. - -If you see a node unexpectedly leave the cluster with the `disconnected` -reason, something other than {es} likely caused the connection to close. A -common cause is a misconfigured firewall with an improper timeout or another -policy that's <>. It could also -be caused by general connectivity issues, such as packet loss due to faulty -hardware or network congestion. If you're an advanced user, you can get more -detailed information about network exceptions by configuring the following -loggers: - -[source,yaml] ----- -logger.org.elasticsearch.transport.TcpTransport: DEBUG -logger.org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport: DEBUG ----- - -In extreme cases, you may need to take packet captures using `tcpdump` to -determine whether messages between nodes are being dropped or rejected by some -other device on the network. +See <>. [discrete] ===== Diagnosing `lagging` nodes -{es} needs every node to process cluster state updates reasonably quickly. If a -node takes too long to process a cluster state update, it can be harmful to the -cluster. The master will remove these nodes with the `lagging` reason. Refer to -<> for information about the settings which control -this mechanism. - -Lagging is typically caused by performance issues on the removed node. However, -a node may also lag due to severe network delays. To rule out network delays, -ensure that `net.ipv4.tcp_retries2` is <>. Log messages that contain `warn threshold` may provide more -information about the root cause. - -If you're an advanced user, you can get more detailed information about what -the node was doing when it was removed by configuring the following logger: - -[source,yaml] ----- -logger.org.elasticsearch.cluster.coordination.LagDetector: DEBUG ----- - -When this logger is enabled, {es} will attempt to run the -<> API on the faulty node and report the results in -the logs on the elected master. The results are compressed, encoded, and split -into chunks to avoid truncation: - -[source,text] ----- -[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 1]: H4sIAAAAAAAA/x... -[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 2]: p7x3w1hmOQVtuV... -[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 3]: v7uTboMGDbyOy+... -[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 4]: 4tse0RnPnLeDNN... -[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) ----- - -To reconstruct the output, base64-decode the data and decompress it using -`gzip`. For instance, on Unix-like systems: - -[source,sh] ----- -cat lagdetector.log | sed -e 's/.*://' | base64 --decode | gzip --decompress ----- +See <>. [discrete] ===== Diagnosing `follower check retry count exceeded` nodes -Nodes sometimes leave the cluster with reason `follower check retry count -exceeded` when they shut down, but if they rejoin the cluster without -restarting then there is some other problem. - -{es} needs every node to respond to network messages successfully and -reasonably quickly. If a node rejects requests or does not respond at all then -it can be harmful to the cluster. If enough consecutive checks fail then the -master will remove the node with reason `follower check retry count exceeded` -and will indicate in the `node-left` message how many of the consecutive -unsuccessful checks failed and how many of them timed out. Refer to -<> for information about the settings which control -this mechanism. - -Timeouts and failures may be due to network delays or performance problems on -the affected nodes. Ensure that `net.ipv4.tcp_retries2` is -<> to eliminate network delays as -a possible cause for this kind of instability. Log messages containing -`warn threshold` may give further clues about the cause of the instability. - -If the last check failed with an exception then the exception is reported, and -typically indicates the problem that needs to be addressed. If any of the -checks timed out then narrow down the problem as follows. - -include::../../troubleshooting/network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-gc-vm] - -include::../../troubleshooting/network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-packet-capture-fault-detection] - -include::../../troubleshooting/network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-threads] - -By default the follower checks will time out after 30s, so if node departures -are unpredictable then capture stack dumps every 15s to be sure that at least -one stack dump was taken at the right time. +See <>. [discrete] ===== Diagnosing `ShardLockObtainFailedException` failures -If a node leaves and rejoins the cluster then {es} will usually shut down and -re-initialize its shards. If the shards do not shut down quickly enough then -{es} may fail to re-initialize them due to a `ShardLockObtainFailedException`. +See <>. -To gather more information about the reason for shards shutting down slowly, -configure the following logger: - -[source,yaml] ----- -logger.org.elasticsearch.env.NodeEnvironment: DEBUG ----- - -When this logger is enabled, {es} will attempt to run the -<> API whenever it encounters a -`ShardLockObtainFailedException`. The results are compressed, encoded, and -split into chunks to avoid truncation: - -[source,text] ----- -[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 1]: H4sIAAAAAAAA/x... -[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 2]: p7x3w1hmOQVtuV... -[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 3]: v7uTboMGDbyOy+... -[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 4]: 4tse0RnPnLeDNN... -[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) ----- - -To reconstruct the output, base64-decode the data and decompress it using -`gzip`. For instance, on Unix-like systems: +[discrete] +===== Diagnosing other network disconnections -[source,sh] ----- -cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress ----- -//end::troubleshooting[] \ No newline at end of file +See <>. diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index d6ee730d5021c..bf7e6de64f093 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -4,11 +4,11 @@ The local gateway stores the cluster state and shard data across full cluster restarts. -The following _static_ settings, which must be set on every master node, +The following _static_ settings, which must be set on every <>, control how long a freshly elected master should wait before it tries to -recover the cluster state and the cluster's data. +recover the <> and the cluster's data. -NOTE: These settings only take effect on a full cluster restart. +NOTE: These settings only take effect during a <>. `gateway.expected_data_nodes`:: (<>) diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 593aa79ded4d9..8fdc9f2e4f9cb 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -5,7 +5,9 @@ Each {es} node has two different network interfaces. Clients send requests to {es}'s REST APIs using its <>, but nodes communicate with other nodes using the <>. The transport interface is also used for communication with -<>. +<>. The transport interface uses a custom +binary protocol sent over <> TCP channels. +Both interfaces can be configured to use <>. You can configure both of these interfaces at the same time using the `network.*` settings. If you have a more complicated network, you might need to diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 25217302b7631..ca1c507aa4ed9 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -1,7 +1,7 @@ [[remote-clusters]] == Remote clusters You can connect a local cluster to other {es} clusters, known as _remote -clusters_. Remote clusters can be located in different datacenters or +clusters_. Remote clusters can be located in different datacenters or geographic regions, and contain indices or data streams that can be replicated with {ccr} or searched by a local cluster using {ccs}. @@ -30,9 +30,9 @@ capabilities, the local and remote cluster must be on the same [discrete] === Add remote clusters -NOTE: The instructions that follow describe how to create a remote connection from a -self-managed cluster. You can also set up {ccs} and {ccr} from an -link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] +NOTE: The instructions that follow describe how to create a remote connection from a +self-managed cluster. You can also set up {ccs} and {ccr} from an +link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] or from an link:https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html[{ece} deployment]. To add remote clusters, you can choose between @@ -52,7 +52,7 @@ controls. <>. Certificate based security model:: Uses mutual TLS authentication for cross-cluster operations. User authentication -is performed on the local cluster and a user's role names are passed to the +is performed on the local cluster and a user's role names are passed to the remote cluster. In this model, a superuser on the local cluster gains total read access to the remote cluster, so it is only suitable for clusters that are in the same security domain. <>. @@ -63,13 +63,17 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is created using a name and a list of seed nodes. When -a remote cluster is registered, its cluster state is retrieved from one of the -seed nodes and up to three _gateway nodes_ are selected as part of remote -cluster requests. This mode requires that the gateway node's publish addresses -are accessible by the local cluster. +In sniff mode, a cluster alias is registered with a name of your choosing and a +list of addresses of _seed_ nodes specified with the +`cluster.remote..seeds` setting. When you register a remote +cluster using sniff mode, {es} retrieves from one of the seed nodes the +addresses of up to three _gateway nodes_. Each `remote_cluster_client` node in +the local {es} cluster then opens several TCP connections to the publish +addresses of the gateway nodes. This mode therefore requires that the gateway +nodes' publish addresses are accessible to nodes in the local cluster. + -Sniff mode is the default connection mode. +Sniff mode is the default connection mode. See <> +for more information about configuring sniff mode. + [[gateway-nodes-selection]] The _gateway nodes_ selection depends on the following criteria: @@ -84,13 +88,21 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is created using a name and a single proxy address. -When you register a remote cluster, a configurable number of socket connections -are opened to the proxy address. The proxy is required to route those -connections to the remote cluster. Proxy mode does not require remote cluster -nodes to have accessible publish addresses. +In proxy mode, a cluster alias is registered with a name of your choosing and +the address of a TCP (layer 4) reverse proxy specified with the +`cluster.remote..proxy_address` setting. You must configure this +proxy to route connections to one or more nodes of the remote cluster. When you +register a remote cluster using proxy mode, {es} opens several TCP connections +to the proxy address and uses these connections to communicate with the remote +cluster. In proxy mode {es} disregards the publish addresses of the remote +cluster nodes which means that the publish addresses of the remote cluster +nodes need not be accessible to the local cluster. ++ +Proxy mode is not the default connection mode, so you must set +`cluster.remote..mode: proxy` to use it. See +<> for more information about configuring proxy +mode. + -The proxy mode is not the default connection mode and must be configured. Proxy mode has the same <> as sniff mode. diff --git a/docs/reference/modules/shard-allocation-desc.asciidoc b/docs/reference/modules/shard-allocation-desc.asciidoc new file mode 100644 index 0000000000000..426ad0da72e1b --- /dev/null +++ b/docs/reference/modules/shard-allocation-desc.asciidoc @@ -0,0 +1,2 @@ +Shard allocation is the process of assigning shard copies to nodes. This can +happen during initial recovery, replica allocation, rebalancing, when nodes are added to or removed from the cluster, or when cluster or index settings that impact allocation are updated. \ No newline at end of file diff --git a/docs/reference/modules/shard-ops.asciidoc b/docs/reference/modules/shard-ops.asciidoc new file mode 100644 index 0000000000000..c0e5ee6a220f0 --- /dev/null +++ b/docs/reference/modules/shard-ops.asciidoc @@ -0,0 +1,75 @@ +[[shard-allocation-relocation-recovery]] +=== Shard allocation, relocation, and recovery + +Each <> in Elasticsearch is divided into one or more <>. +Each document in an index belongs to a single shard. + +A cluster can contain multiple copies of a shard. Each shard has one distinguished shard copy called the _primary_, and zero or more non-primary copies called _replicas_. The primary shard copy serves as the main entry point for all indexing operations. The operations on the primary shard copy are then forwarded to its replicas. + +Replicas maintain redundant copies of your data across the <> in your cluster, protecting against hardware failure and increasing capacity to serve read requests like searching or retrieving a document. If the primary shard copy fails, then a replica is promoted to primary and takes over the primary's responsibilities. + +Over the course of normal operation, Elasticsearch allocates shard copies to nodes, relocates shard copies across nodes to balance the cluster or satisfy new allocation constraints, and recovers shards to initialize new copies. In this topic, you'll learn how these operations work and how you can control them. + +TIP: To learn about optimizing the number and size of shards in your cluster, refer to <>. To learn about how read and write operations are replicated across shards and shard copies, refer to <>. + +[[shard-allocation]] +==== Shard allocation + +include::{es-ref-dir}/modules/shard-allocation-desc.asciidoc[] + +By default, the primary and replica shard copies for an index can be allocated to any node in the cluster, and may be relocated to rebalance the cluster. + +===== Adjust shard allocation settings + +You can control how shard copies are allocated using the following settings: + +- <>: Use these settings to control how shard copies are allocated and balanced across the entire cluster. For example, you might want to allocate nodes availability zones, or prevent certain nodes from being used so you can perform maintenance. + +- <>: Use these settings to control how the shard copies for a specific index are allocated. For example, you might want to allocate an index to a node in a specific data tier, or to an node with specific attributes. + +===== Monitor shard allocation + +If a shard copy is unassigned, it means that the shard copy is not allocated to any node in the cluster. This can happen if there are not enough nodes in the cluster to allocate the shard copy, or if the shard copy can't be allocated to any node that satisfies the shard allocation filtering rules. When a shard copy is unassigned, your cluster is considered unhealthy and returns a yellow or red cluster health status. + +You can use the following APIs to monitor shard allocation: + +- <> +- <> +- <> + +<>. + +[[shard-recovery]] +==== Shard recovery + +include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[] + +===== Adjust shard recovery settings + +To control how shards are recovered, for example the resources that can be used by recovery operations, and which indices should be prioritized for recovery, you can adjust the following settings: + +- <> +- <> +- <>, including <> and <> + +Shard recovery operations also respect general shard allocation settings. + +===== Monitor shard recovery + +You can use the following APIs to monitor shard allocation: + + - View a list of in-progress and completed recoveries using the <> + - View detailed information about a specific recovery using the <> + +[[shard-relocation]] +==== Shard relocation + +Shard relocation is the process of moving shard copies from one node to another. This can happen when a node joins or leaves the cluster, or when the cluster is rebalancing. + +When a shard copy is relocated, it is created as a new shard copy on the target node. When the shard copy is fully allocated and recovered, the old shard copy is deleted. If the shard copy being relocated is a primary, then the new shard copy is marked as primary before the old shard copy is deleted. + +===== Adjust shard relocation settings + +You can control how and when shard copies are relocated. For example, you can adjust the rebalancing settings that control when shard copies are relocated to balance the cluster, or the high watermark for disk-based shard allocation that can trigger relocation. These settings are part of the <>. + +Shard relocation operations also respect shard allocation and recovery settings. \ No newline at end of file diff --git a/docs/reference/modules/shard-recovery-desc.asciidoc b/docs/reference/modules/shard-recovery-desc.asciidoc new file mode 100644 index 0000000000000..67eaceb528962 --- /dev/null +++ b/docs/reference/modules/shard-recovery-desc.asciidoc @@ -0,0 +1,16 @@ +Shard recovery is the process of initializing a shard copy, such as restoring a +primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search +and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A <> operation. +* A <>, <>, or +<> operation. + +You can determine the cause of a shard recovery using the <> or <> APIs. \ No newline at end of file diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 63ba4046a395d..1e3380389d861 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -397,68 +397,3 @@ This query does *not* match a document containing the phrase `hot porridge is salty porridge`, because the intervals returned by the match query for `hot porridge` only cover the initial two terms in this document, and these do not overlap the intervals covering `salty`. - -Another restriction to be aware of is the case of `any_of` rules that contain -sub-rules which overlap. In particular, if one of the rules is a strict -prefix of the other, then the longer rule can never match, which can -cause surprises when used in combination with `max_gaps`. Consider the -following query, searching for `the` immediately followed by `big` or `big bad`, -immediately followed by `wolf`: - -[source,console] --------------------------------------------------- -POST _search -{ - "query": { - "intervals" : { - "my_text" : { - "all_of" : { - "intervals" : [ - { "match" : { "query" : "the" } }, - { "any_of" : { - "intervals" : [ - { "match" : { "query" : "big" } }, - { "match" : { "query" : "big bad" } } - ] } }, - { "match" : { "query" : "wolf" } } - ], - "max_gaps" : 0, - "ordered" : true - } - } - } - } -} --------------------------------------------------- - -Counter-intuitively, this query does *not* match the document `the big bad -wolf`, because the `any_of` rule in the middle only produces intervals -for `big` - intervals for `big bad` being longer than those for `big`, while -starting at the same position, and so being minimized away. In these cases, -it's better to rewrite the query so that all of the options are explicitly -laid out at the top level: - -[source,console] --------------------------------------------------- -POST _search -{ - "query": { - "intervals" : { - "my_text" : { - "any_of" : { - "intervals" : [ - { "match" : { - "query" : "the big bad wolf", - "ordered" : true, - "max_gaps" : 0 } }, - { "match" : { - "query" : "the big wolf", - "ordered" : true, - "max_gaps" : 0 } } - ] - } - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 319ede7c4ac05..b45247ace3735 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -30,7 +30,7 @@ If you don't need to support a query syntax, consider using the syntax, use the <> query, which is less strict. ==== - + [[query-string-query-ex-request]] ==== Example request @@ -83,7 +83,7 @@ could be expensive. There is a limit on the number of fields times terms that can be queried at once. It is defined by the `indices.query.bool.max_clause_count` -<>, which defaults to 4096. +<>. ==== -- diff --git a/docs/reference/query-dsl/rule-query.asciidoc b/docs/reference/query-dsl/rule-query.asciidoc index cc5616c01eecd..dfedc2261bbde 100644 --- a/docs/reference/query-dsl/rule-query.asciidoc +++ b/docs/reference/query-dsl/rule-query.asciidoc @@ -13,9 +13,10 @@ The old syntax using `rule_query` and `ruleset_id` is deprecated and will be rem ==== Applies <> to the query before returning results. -This feature is used to promote documents in the manner of a <> based on matching defined rules. +Query rules can be used to promote documents in the manner of a <> based on matching defined rules, or to identify specific documents to exclude from a contextual result set. If no matching query rules are defined, the "organic" matches for the query are returned. All matching rules are applied in the order in which they appear in the query ruleset. +If the same document matches both an `exclude` rule and a `pinned` rule, the document will be excluded. [NOTE] ==== diff --git a/docs/reference/query-dsl/semantic-query.asciidoc b/docs/reference/query-dsl/semantic-query.asciidoc index d0eb2da95ebc6..22b5e6c5e6aad 100644 --- a/docs/reference/query-dsl/semantic-query.asciidoc +++ b/docs/reference/query-dsl/semantic-query.asciidoc @@ -128,6 +128,10 @@ If you want to fine-tune a search on a `semantic_text` field, you need to know t You can find the task type using the <>, and check the `task_type` associated with the {infer} service. Depending on the `task_type`, use either the <> or the <> query for greater flexibility and customization. +NOTE: While it is possible to use the `sparse_vector` query or the `knn` query +on a `semantic_text` field, it is not supported to use the `semantic_query` on a +`sparse_vector` or `dense_vector` field type. + [discrete] [[search-sparse-inference]] diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index aefb3e4b75eb5..5a5f0e1f5ff99 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -39,7 +39,8 @@ GET /_search -------------------------------------------------- WARNING: `span_multi` queries will hit too many clauses failure if the number of terms that match the query exceeds the -boolean query limit (defaults to 4096).To avoid an unbounded expansion you can set the <>. +To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will rewrite any prefix query on the field to a single term query that matches the indexed prefix. diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc index 9737673be009c..714ed9b096d1d 100644 --- a/docs/reference/query-rules/apis/put-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -26,7 +26,10 @@ Requires the `manage_search_query_rules` privilege. `type`:: (Required, string) The type of rule. -At this time only `pinned` query rule types are allowed. +At this time the following query rule types are allowed: + +- `pinned` will identify and pin specific documents to the top of search results. +- `exclude` will exclude specific documents from search results. `criteria`:: (Required, array of objects) The criteria that must be met for the rule to be applied. @@ -80,17 +83,18 @@ Required for all criteria types except `always`. The format of this action depends on the rule type. Actions depend on the rule type. -For `pinned` rules, actions follow the format specified by the <>. -The following actions are allowed: +The following actions are allowed for `pinned` or `exclude` rules: -- `ids` (Optional, array of strings) The unique <> of the documents to pin. +- `ids` (Optional, array of strings) The unique <> of the documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. -- `docs` (Optional, array of objects) The documents to pin. +- `docs` (Optional, array of objects) The documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. +There is a maximum value of 100 documents in a rule. You can specify the following attributes for each document: + -- -- `_index` (Required, string) The index of the document to pin. +- `_index` (Required, string) The index of the document. +If null, all documents with the specified `_id` will be affected across all searched indices. - `_id` (Required, string) The unique <>. -- @@ -104,7 +108,7 @@ If multiple matching rules pin more than 100 documents, only the first 100 docum The following example creates a new query rule with the ID `my-rule1` in a query ruleset called `my-ruleset`. -`my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. +- `my-rule1` will select documents to promote with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. [source,console] ---- diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index c164e9e140a4e..df7ec100db076 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -34,7 +34,7 @@ Each rule must have the following information: - `rule_id` (Required, string) A unique identifier for this rule. - `type` (Required, string) The type of rule. -At this time only `pinned` query rule types are allowed. +At this time only `pinned` and `exclude` query rule types are allowed. - `criteria` (Required, array of objects) The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. - `actions` (Required, object) The actions to take when the rule is matched. @@ -84,13 +84,13 @@ Only one value must match for the criteria to be met. Required for all criteria types except `always`. Actions depend on the rule type. -For `pinned` rules, actions follow the format specified by the <>. -The following actions are allowed: +The following actions are allowed for `pinned` or `exclude` rules: -- `ids` (Optional, array of strings) The unique <> of the documents to pin. +- `ids` (Optional, array of strings) The unique <> of the documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. -- `docs` (Optional, array of objects) The documents to pin. +- `docs` (Optional, array of objects) The documents to apply the rule to. Only one of `ids` or `docs` may be specified, and at least one must be specified. +There is a maximum value of 100 documents in a rule. You can specify the following attributes for each document: + -- @@ -98,7 +98,7 @@ You can specify the following attributes for each document: - `_id` (Required, string) The unique <>. -- -IMPORTANT: Due to limitations within <>, you can only pin documents using `ids` or `docs`, but cannot use both in single rule. +IMPORTANT: Due to limitations within <>, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. @@ -111,7 +111,7 @@ The following example creates a new query ruleset called `my-ruleset`. Two rules are associated with `my-ruleset`: - `my-rule1` will pin documents with IDs `id1` and `id2` when `user_query` contains `pugs` _or_ `puggles` **and** `user_country` exactly matches `us`. -- `my-rule2` will pin documents from different, specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`. +- `my-rule2` will exclude documents from different, specified indices with IDs `id3` and `id4` when the `query_string` fuzzily matches `rescue dogs`. [source,console] ---- @@ -142,7 +142,7 @@ PUT _query_rules/my-ruleset }, { "rule_id": "my-rule2", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "fuzzy", diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index c13c1c95c09ff..2069c1bd96ff0 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -1,16 +1,532 @@ [[release-notes-8.15.0]] == {es} version 8.15.0 -coming[8.15.0] - Also see <>. + [[known-issues-8.15.0]] [float] === Known issues - * The `pytorch_inference` process used to run Machine Learning models can consume large amounts of memory. In environments where the available memory is limited, the OS Out of Memory Killer will kill the `pytorch_inference` process to reclaim memory. This can cause inference requests to fail. Elasticsearch will automatically restart the `pytorch_inference` process after it is killed up to four times in 24 hours. (issue: {es-issue}110530[#110530]) + +* Pipeline aggregations under `time_series` and `categorize_text` aggregations are never +returned (issue: {es-issue}111679[#111679]) + +* Elasticsearch will not start on Windows machines if +[`bootstrap.memory_lock` is set to `true`](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#bootstrap-memory_lock). +Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the +recommendation in the manual to entirely disable swap instead of using the +memory lock feature (issue: {es-issue}111847[#111847]) + +* The `took` field of the response to the <> API is incorrect and may be rather large. Clients which +<> assume that this value will be within a particular range (e.g. that it fits into a 32-bit +signed integer) may encounter errors (issue: {es-issue}111854[#111854]) + +[[breaking-8.15.0]] +[float] +=== Breaking changes + +Cluster Coordination:: +* Interpret `?timeout=-1` as infinite ack timeout {es-pull}107675[#107675] + +Inference API:: +* Replace `model_id` with `inference_id` in GET inference API {es-pull}111366[#111366] + +Rollup:: +* Disallow new rollup jobs in clusters with no rollup usage {es-pull}108624[#108624] (issue: {es-issue}108381[#108381]) + +Search:: +* Change `skip_unavailable` remote cluster setting default value to true {es-pull}105792[#105792] + +[[bug-8.15.0]] +[float] +=== Bug fixes + +Aggregations:: +* Don't sample calls to `ReduceContext#consumeBucketsAndMaybeBreak` ins `InternalDateHistogram` and `InternalHistogram` during reduction {es-pull}110186[#110186] +* Fix `ClassCastException` in Significant Terms {es-pull}108429[#108429] (issue: {es-issue}108427[#108427]) +* Run terms concurrently when cardinality is only lower than shard size {es-pull}110369[#110369] (issue: {es-issue}105505[#105505]) + +Allocation:: +* Fix misc trappy allocation API timeouts {es-pull}109241[#109241] +* Fix trappy timeout in allocation explain API {es-pull}109240[#109240] + +Analysis:: +* Correct positioning for unique token filter {es-pull}109395[#109395] + +Authentication:: +* Add comma before charset parameter in WWW-Authenticate response header {es-pull}110906[#110906] +* Avoid NPE if `users_roles` file does not exist {es-pull}109606[#109606] +* Improve security-crypto threadpool overflow handling {es-pull}111369[#111369] + +Authorization:: +* Fix trailing slash in `security.put_privileges` specification {es-pull}110177[#110177] +* Fixes cluster state-based role mappings not recovered from disk {es-pull}109167[#109167] +* Handle unmatching remote cluster wildcards properly for `IndicesRequest.SingleIndexNoWildcards` requests {es-pull}109185[#109185] + +Autoscaling:: +* Expose `?master_timeout` in autoscaling APIs {es-pull}108759[#108759] + +CRUD:: +* Update checkpoints after post-replication actions, even on failure {es-pull}109908[#109908] + +Cluster Coordination:: +* Deserialize publish requests on generic thread-pool {es-pull}108814[#108814] (issue: {es-issue}106352[#106352]) +* Fail cluster state API if blocked {es-pull}109618[#109618] (issue: {es-issue}107503[#107503]) +* Use `scheduleUnlessShuttingDown` in `LeaderChecker` {es-pull}108643[#108643] (issue: {es-issue}108642[#108642]) + +Data streams:: +* Apm-data: set concrete values for `metricset.interval` {es-pull}109043[#109043] +* Ecs@mappings: reduce scope for `ecs_geo_point` {es-pull}108349[#108349] (issue: {es-issue}108338[#108338]) +* Include component templates in retention validaiton {es-pull}109779[#109779] + +Distributed:: +* Associate restore snapshot task to parent mount task {es-pull}108705[#108705] (issue: {es-issue}105830[#105830]) +* Don't detect `PlainActionFuture` deadlock on concurrent complete {es-pull}110361[#110361] (issues: {es-issue}110181[#110181], {es-issue}110360[#110360]) +* Handle nullable `DocsStats` and `StoresStats` {es-pull}109196[#109196] + +Downsampling:: +* Support flattened fields and multi-fields as dimensions in downsampling {es-pull}110066[#110066] (issue: {es-issue}99297[#99297]) + +ES|QL:: +* ESQL: Change "substring" function to not return null on empty string {es-pull}109174[#109174] +* ESQL: Fix Join references {es-pull}109989[#109989] +* ESQL: Fix LOOKUP attribute shadowing {es-pull}109807[#109807] (issue: {es-issue}109392[#109392]) +* ESQL: Fix Max doubles bug with negatives and add tests for Max and Min {es-pull}110586[#110586] +* ESQL: Fix `IpPrefix` function not handling correctly `ByteRefs` {es-pull}109205[#109205] (issue: {es-issue}109198[#109198]) +* ESQL: Fix equals `hashCode` for functions {es-pull}107947[#107947] (issue: {es-issue}104393[#104393]) +* ESQL: Fix variable shadowing when pushing down past Project {es-pull}108360[#108360] (issue: {es-issue}108008[#108008]) +* ESQL: Validate unique plan attribute names {es-pull}110488[#110488] (issue: {es-issue}110541[#110541]) +* ESQL: change from quoting from backtick to quote {es-pull}108395[#108395] +* ESQL: make named params objects truly per request {es-pull}110046[#110046] (issue: {es-issue}110028[#110028]) +* ES|QL: Fix DISSECT that overwrites input {es-pull}110201[#110201] (issue: {es-issue}110184[#110184]) +* ES|QL: limit query depth to 500 levels {es-pull}108089[#108089] (issue: {es-issue}107752[#107752]) +* ES|QL: reduce max expression depth to 400 {es-pull}111186[#111186] (issue: {es-issue}109846[#109846]) +* Fix ST_DISTANCE Lucene push-down for complex predicates {es-pull}110391[#110391] (issue: {es-issue}110349[#110349]) +* Fix `ClassCastException` with MV_EXPAND on missing field {es-pull}110096[#110096] (issue: {es-issue}109974[#109974]) +* Fix bug in union-types with type-casting in grouping key of STATS {es-pull}110476[#110476] (issues: {es-issue}109922[#109922], {es-issue}110477[#110477]) +* Fix for union-types for multiple columns with the same name {es-pull}110793[#110793] (issues: {es-issue}110490[#110490], {es-issue}109916[#109916]) +* [ESQL] Count_distinct(_source) should return a 400 {es-pull}110824[#110824] +* [ESQL] Fix parsing of large magnitude negative numbers {es-pull}110665[#110665] (issue: {es-issue}104323[#104323]) +* [ESQL] Migrate `SimplifyComparisonArithmetics` optimization {es-pull}109256[#109256] (issues: {es-issue}108388[#108388], {es-issue}108743[#108743]) + +Engine:: +* Async close of `IndexShard` {es-pull}108145[#108145] + +Highlighting:: +* Fix issue with returning incomplete fragment for plain highlighter {es-pull}110707[#110707] + +ILM+SLM:: +* Allow `read_slm` to call GET /_slm/status {es-pull}108333[#108333] + +Indices APIs:: +* Create a new `NodeRequest` for every `NodesDataTiersUsageTransport` use {es-pull}108379[#108379] + +Infra/Core:: +* Add a cluster listener to fix missing node features after upgrading from a version prior to 8.13 {es-pull}110710[#110710] (issue: {es-issue}109254[#109254]) +* Add bounds checking to parsing ISO8601 timezone offset values {es-pull}108672[#108672] +* Fix native preallocate to actually run {es-pull}110851[#110851] +* Ignore additional cpu.stat fields {es-pull}108019[#108019] (issue: {es-issue}107983[#107983]) +* Specify parse index when error occurs on multiple datetime parses {es-pull}108607[#108607] + +Infra/Metrics:: +* Provide document size reporter with `MapperService` {es-pull}109794[#109794] + +Infra/Node Lifecycle:: +* Expose `?master_timeout` on get-shutdown API {es-pull}108886[#108886] +* Fix serialization of put-shutdown request {es-pull}107862[#107862] (issue: {es-issue}107857[#107857]) +* Support wait indefinitely for search tasks to complete on node shutdown {es-pull}107426[#107426] + +Infra/REST API:: +* Add some missing timeout params to REST API specs {es-pull}108761[#108761] +* Consider `error_trace` supported by all endpoints {es-pull}109613[#109613] (issue: {es-issue}109612[#109612]) + +Ingest Node:: +* Fix Dissect with leading non-ascii characters {es-pull}111184[#111184] +* Fix enrich policy runner exception handling on empty segments response {es-pull}111290[#111290] +* GeoIP tasks should wait longer for master {es-pull}108410[#108410] +* Removing the use of Stream::peek from `GeoIpDownloader::cleanDatabases` {es-pull}110666[#110666] +* Simulate should succeed if `ignore_missing_pipeline` {es-pull}108106[#108106] (issue: {es-issue}107314[#107314]) + +Machine Learning:: +* Allow deletion of the ELSER inference service when reference in ingest {es-pull}108146[#108146] +* Avoid `InferenceRunner` deadlock {es-pull}109551[#109551] +* Correctly handle duplicate model ids for the `_cat` trained models api and usage statistics {es-pull}109126[#109126] +* Do not use global ordinals strategy if the leaf reader context cannot be obtained {es-pull}108459[#108459] +* Fix NPE in trained model assignment updater {es-pull}108942[#108942] +* Fix serialising inference delete response {es-pull}109384[#109384] +* Fix "stack use after scope" memory error {ml-pull}2673[#2673] +* Fix trailing slash in `ml.get_categories` specification {es-pull}110146[#110146] +* Handle any exception thrown by inference {ml-pull}2680[#2680] +* Increase response size limit for batched requests {es-pull}110112[#110112] +* Offload request to generic threadpool {es-pull}109104[#109104] (issue: {es-issue}109100[#109100]) +* Propagate accurate deployment timeout {es-pull}109534[#109534] (issue: {es-issue}109407[#109407]) +* Refactor TextEmbeddingResults to use primitives rather than objects {es-pull}108161[#108161] +* Require question to be non-null in `QuestionAnsweringConfig` {es-pull}107972[#107972] +* Start Trained Model Deployment API request query params now override body params {es-pull}109487[#109487] +* Suppress deprecation warnings from ingest pipelines when deleting trained model {es-pull}108679[#108679] (issue: {es-issue}105004[#105004]) +* Use default translog durability on AD results index {es-pull}108999[#108999] +* Use the multi node routing action for internal inference services {es-pull}109358[#109358] +* [Inference API] Extract optional long instead of integer in `RateLimitSettings#of` {es-pull}108602[#108602] +* [Inference API] Fix serialization for inference delete endpoint response {es-pull}110431[#110431] +* [Inference API] Replace `model_id` with `inference_id` in inference API except when stored {es-pull}111366[#111366] + +Mapping:: +* Fix off by one error when handling null values in range fields {es-pull}107977[#107977] (issue: {es-issue}107282[#107282]) +* Limit number of synonym rules that can be created {es-pull}109981[#109981] (issue: {es-issue}108785[#108785]) +* Propagate mapper builder context flags across nested mapper builder context creation {es-pull}109963[#109963] +* `DenseVectorFieldMapper` fixed typo {es-pull}108065[#108065] + +Network:: +* Use proper executor for failing requests when connection closes {es-pull}109236[#109236] (issue: {es-issue}109225[#109225]) +* `NoSuchRemoteClusterException` should not be thrown when a remote is configured {es-pull}107435[#107435] (issue: {es-issue}107381[#107381]) + +Packaging:: +* Adding override for lintian false positive on `libvec.so` {es-pull}108521[#108521] (issue: {es-issue}108514[#108514]) + +Ranking:: +* Fix score count validation in reranker response {es-pull}111424[#111424] (issue: {es-issue}111202[#111202]) + +Rollup:: +* Fix trailing slash in two rollup specifications {es-pull}110176[#110176] + +Search:: +* Adding score from `RankDoc` to `SearchHit` {es-pull}108870[#108870] +* Better handling of multiple rescorers clauses with LTR {es-pull}109071[#109071] +* Correct query profiling for conjunctions {es-pull}108122[#108122] (issue: {es-issue}108116[#108116]) +* Fix `DecayFunctions'` `toString` {es-pull}107415[#107415] (issue: {es-issue}100870[#100870]) +* Fix leak in collapsing search results {es-pull}110927[#110927] +* Fork freeing search/scroll contexts to GENERIC pool {es-pull}109481[#109481] + +Security:: +* Add permission to secure access to certain config files {es-pull}107827[#107827] +* Add permission to secure access to certain config files specified by settings {es-pull}108895[#108895] +* Fix trappy timeouts in security settings APIs {es-pull}109233[#109233] + +Snapshot/Restore:: +* Stricter failure handling in multi-repo get-snapshots request handling {es-pull}107191[#107191] + +TSDB:: +* Sort time series indices by time range in `GetDataStreams` API {es-pull}107967[#107967] (issue: {es-issue}102088[#102088]) + +Transform:: +* Always pick the user `maxPageSize` value {es-pull}109876[#109876] (issue: {es-issue}109844[#109844]) +* Exit gracefully when deleted {es-pull}107917[#107917] (issue: {es-issue}107266[#107266]) +* Fix NPE during destination index creation {es-pull}108891[#108891] (issue: {es-issue}108890[#108890]) +* Forward `indexServiceSafe` exception to listener {es-pull}108517[#108517] (issue: {es-issue}108418[#108418]) +* Halt Indexer on Stop/Abort API {es-pull}107792[#107792] +* Handle `IndexNotFoundException` {es-pull}108394[#108394] (issue: {es-issue}107263[#107263]) +* Prevent concurrent jobs during cleanup {es-pull}109047[#109047] +* Redirect `VersionConflict` to reset code {es-pull}108070[#108070] +* Reset max page size to settings value {es-pull}109449[#109449] (issue: {es-issue}109308[#109308]) + +Vector Search:: +* Ensure vector similarity correctly limits `inner_hits` returned for nested kNN {es-pull}111363[#111363] (issue: {es-issue}111093[#111093]) +* Ensure we return non-negative scores when scoring scalar dot-products {es-pull}108522[#108522] + +Watcher:: +* Avoiding running watch jobs in TickerScheduleTriggerEngine if it is paused {es-pull}110061[#110061] (issue: {es-issue}105933[#105933]) + +[[deprecation-8.15.0]] +[float] +=== Deprecations + +ILM+SLM:: +* Deprecate using slm privileges to access ilm {es-pull}110540[#110540] + +Infra/Settings:: +* `ParseHeapRatioOrDeprecatedByteSizeValue` for `indices.breaker.total.limit` {es-pull}110236[#110236] + +Machine Learning:: +* Deprecate `text_expansion` and `weighted_tokens` queries {es-pull}109880[#109880] + +[[enhancement-8.15.0]] +[float] +=== Enhancements + +Aggregations:: +* Aggs: Scripted metric allow list {es-pull}109444[#109444] +* Enable inter-segment concurrency for low cardinality numeric terms aggs {es-pull}108306[#108306] +* Increase size of big arrays only when there is an actual value in the aggregators {es-pull}107764[#107764] +* Increase size of big arrays only when there is an actual value in the aggregators (Analytics module) {es-pull}107813[#107813] +* Optimise `BinaryRangeAggregator` for single value fields {es-pull}108016[#108016] +* Optimise cardinality aggregations for single value fields {es-pull}107892[#107892] +* Optimise composite aggregations for single value fields {es-pull}107897[#107897] +* Optimise few metric aggregations for single value fields {es-pull}107832[#107832] +* Optimise histogram aggregations for single value fields {es-pull}107893[#107893] +* Optimise multiterms aggregation for single value fields {es-pull}107937[#107937] +* Optimise terms aggregations for single value fields {es-pull}107930[#107930] +* Speed up collecting zero document string terms {es-pull}110922[#110922] + +Allocation:: +* Log shard movements {es-pull}105829[#105829] +* Support effective watermark thresholds in node stats API {es-pull}107244[#107244] (issue: {es-issue}106676[#106676]) + +Application:: +* Add Create or update query rule API call {es-pull}109042[#109042] +* Rename rule query and add support for multiple rulesets {es-pull}108831[#108831] +* Support multiple associated groups for TopN {es-pull}108409[#108409] (issue: {es-issue}108018[#108018]) +* [Connector API] Change `UpdateConnectorFiltering` API to have better defaults {es-pull}108612[#108612] + +Authentication:: +* Expose API Key cache metrics {es-pull}109078[#109078] + +Authorization:: +* Cluster state role mapper file settings service {es-pull}107886[#107886] +* Cluster-state based Security role mapper {es-pull}107410[#107410] +* Introduce role description field {es-pull}107088[#107088] +* [Osquery] Extend `kibana_system` role with an access to new `osquery_manager` index {es-pull}108849[#108849] + +Data streams:: +* Add metrics@custom component template to metrics-*-* index template {es-pull}109540[#109540] (issue: {es-issue}109475[#109475]) +* Apm-data: enable plugin by default {es-pull}108860[#108860] +* Apm-data: ignore malformed fields, and too many dynamic fields {es-pull}108444[#108444] +* Apm-data: improve default pipeline performance {es-pull}108396[#108396] (issue: {es-issue}108290[#108290]) +* Apm-data: improve indexing resilience {es-pull}108227[#108227] +* Apm-data: increase priority above Fleet templates {es-pull}108885[#108885] +* Apm-data: increase version for templates {es-pull}108340[#108340] +* Apm-data: set codec: best_compression for logs-apm.* data streams {es-pull}108862[#108862] +* Remove `default_field: message` from metrics index templates {es-pull}110651[#110651] + +Distributed:: +* Add `wait_for_completion` parameter to delete snapshot request {es-pull}109462[#109462] (issue: {es-issue}101300[#101300]) +* Improve mechanism for extracting the result of a `PlainActionFuture` {es-pull}110019[#110019] (issue: {es-issue}108125[#108125]) + +ES|QL:: +* Add `BlockHash` for 3 `BytesRefs` {es-pull}108165[#108165] +* Allow `LuceneSourceOperator` to early terminate {es-pull}108820[#108820] +* Check if `CsvTests` required capabilities exist {es-pull}108684[#108684] +* ESQL: Add aggregates node level reduction {es-pull}107876[#107876] +* ESQL: Add more time span units {es-pull}108300[#108300] +* ESQL: Implement LOOKUP, an "inline" enrich {es-pull}107987[#107987] (issue: {es-issue}107306[#107306]) +* ESQL: Renamed `TopList` to Top {es-pull}110347[#110347] +* ESQL: Union Types Support {es-pull}107545[#107545] (issue: {es-issue}100603[#100603]) +* ESQL: add REPEAT string function {es-pull}109220[#109220] +* ES|QL Add primitive float support to the Compute Engine {es-pull}109746[#109746] (issue: {es-issue}109178[#109178]) +* ES|QL Add primitive float variants of all aggregators to the compute engine {es-pull}109781[#109781] +* ES|QL: vectorize eval {es-pull}109332[#109332] +* Optimize ST_DISTANCE filtering with Lucene circle intersection query {es-pull}110102[#110102] (issue: {es-issue}109972[#109972]) +* Optimize for single value in ordinals grouping {es-pull}108118[#108118] +* Rewrite away type converting functions that do not convert types {es-pull}108713[#108713] (issue: {es-issue}107716[#107716]) +* ST_DISTANCE Function {es-pull}108764[#108764] (issue: {es-issue}108212[#108212]) +* Support metrics counter types in ESQL {es-pull}107877[#107877] +* [ESQL] CBRT function {es-pull}108574[#108574] +* [ES|QL] Convert string to datetime when the other size of an arithmetic operator is `date_period` or `time_duration` {es-pull}108455[#108455] +* [ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest` {es-pull}108421[#108421] (issue: {es-issue}107029[#107029]) +* [ES|QL] `weighted_avg` {es-pull}109993[#109993] + +Engine:: +* Drop shards close timeout when stopping node. {es-pull}107978[#107978] (issue: {es-issue}107938[#107938]) +* Update translog `writeLocation` for `flushListener` after commit {es-pull}109603[#109603] + +Geo:: +* Optimize `GeoBounds` and `GeoCentroid` aggregations for single value fields {es-pull}107663[#107663] + +Health:: +* Log details of non-green indicators in `HealthPeriodicLogger` {es-pull}108266[#108266] + +Highlighting:: +* Unified Highlighter to support matched_fields {es-pull}107640[#107640] (issue: {es-issue}5172[#5172]) + +Infra/Core:: +* Add allocation explain output for THROTTLING shards {es-pull}109563[#109563] +* Create custom parser for ISO-8601 datetimes {es-pull}106486[#106486] (issue: {es-issue}102063[#102063]) +* Extend ISO8601 datetime parser to specify forbidden fields, allowing it to be used on more formats {es-pull}108606[#108606] +* add Elastic-internal stable bridge api for use by Logstash {es-pull}108171[#108171] + +Infra/Metrics:: +* Add auto-sharding APM metrics {es-pull}107593[#107593] +* Add request metric to `RestController` to track success/failure (by status code) {es-pull}109957[#109957] +* Allow RA metrics to be reported upon parsing completed or accumulated {es-pull}108726[#108726] +* Provide the `DocumentSizeReporter` with index mode {es-pull}108947[#108947] +* Return noop instance `DocSizeObserver` for updates with scripts {es-pull}108856[#108856] + +Ingest Node:: +* Add `continent_code` support to the geoip processor {es-pull}108780[#108780] (issue: {es-issue}85820[#85820]) +* Add support for the 'Connection Type' database to the geoip processor {es-pull}108683[#108683] +* Add support for the 'Domain' database to the geoip processor {es-pull}108639[#108639] +* Add support for the 'ISP' database to the geoip processor {es-pull}108651[#108651] +* Adding `hits_time_in_millis` and `misses_time_in_millis` to enrich cache stats {es-pull}107579[#107579] +* Adding `user_type` support for the enterprise database for the geoip processor {es-pull}108687[#108687] +* Adding human readable times to geoip stats {es-pull}107647[#107647] +* Include doc size info in ingest stats {es-pull}107240[#107240] (issue: {es-issue}106386[#106386]) +* Make ingest byte stat names more descriptive {es-pull}108786[#108786] +* Return ingest byte stats even when 0-valued {es-pull}108796[#108796] +* Test pipeline run after reroute {es-pull}108693[#108693] + +Logs:: +* Introduce a node setting controlling the activation of the `logs` index mode in logs@settings component template {es-pull}109025[#109025] (issue: {es-issue}108762[#108762]) +* Support index sorting with nested fields {es-pull}110251[#110251] (issue: {es-issue}107349[#107349]) + +Machine Learning:: +* Add Anthropic messages integration to Inference API {es-pull}109893[#109893] +* Add `sparse_vector` query {es-pull}108254[#108254] +* Add model download progress to the download task status {es-pull}107676[#107676] +* Add rate limiting support for the Inference API {es-pull}107706[#107706] +* Add the rerank task to the Elasticsearch internal inference service {es-pull}108452[#108452] +* Default the HF service to cosine similarity {es-pull}109967[#109967] +* GA the update trained model action {es-pull}108868[#108868] +* Handle the "JSON memory allocator bytes" field {es-pull}109653[#109653] +* Inference Processor: skip inference when all fields are missing {es-pull}108131[#108131] +* Log 'No statistics at.. ' message as a warning {ml-pull}2684[#2684] +* Optimise frequent item sets aggregation for single value fields {es-pull}108130[#108130] +* Sentence Chunker {es-pull}110334[#110334] +* [Inference API] Add Amazon Bedrock Support to Inference API {es-pull}110248[#110248] +* [Inference API] Add Mistral Embeddings Support to Inference API {es-pull}109194[#109194] +* [Inference API] Check for related pipelines on delete inference endpoint {es-pull}109123[#109123] + +Mapping:: +* Add ignored field values to synthetic source {es-pull}107567[#107567] +* Apply FLS to the contents of `IgnoredSourceFieldMapper` {es-pull}109931[#109931] +* Binary field enables doc values by default for index mode with synthe… {es-pull}107739[#107739] (issue: {es-issue}107554[#107554]) +* Feature/annotated text store defaults {es-pull}107922[#107922] (issue: {es-issue}107734[#107734]) +* Handle `ignore_above` in synthetic source for flattened fields {es-pull}110214[#110214] +* Opt in keyword field into fallback synthetic source if needed {es-pull}110016[#110016] +* Opt in number fields into fallback synthetic source when doc values a… {es-pull}110160[#110160] +* Reflect latest changes in synthetic source documentation {es-pull}109501[#109501] +* Store source for fields in objects with `dynamic` override {es-pull}108911[#108911] +* Store source for nested objects {es-pull}108818[#108818] +* Support synthetic source for `geo_point` when `ignore_malformed` is used {es-pull}109651[#109651] +* Support synthetic source for `scaled_float` and `unsigned_long` when `ignore_malformed` is used {es-pull}109506[#109506] +* Support synthetic source for date fields when `ignore_malformed` is used {es-pull}109410[#109410] +* Support synthetic source together with `ignore_malformed` in histogram fields {es-pull}109882[#109882] +* Track source for arrays of objects {es-pull}108417[#108417] (issue: {es-issue}90708[#90708]) +* Track synthetic source for disabled objects {es-pull}108051[#108051] + +Network:: +* Detect long-running tasks on network threads {es-pull}109204[#109204] + +Ranking:: +* Enabling profiling for `RankBuilders` and adding tests for RRF {es-pull}109470[#109470] + +Relevance:: +* [Query Rules] Add API calls to get or delete individual query rules within a ruleset {es-pull}109554[#109554] +* [Query Rules] Require Enterprise License for Query Rules {es-pull}109634[#109634] + +Search:: +* Add AVX-512 optimised vector distance functions for int7 on x64 {es-pull}109084[#109084] +* Add `SparseVectorStats` {es-pull}108793[#108793] +* Add `_name` support for top level `knn` clauses {es-pull}107645[#107645] (issues: {es-issue}106254[#106254], {es-issue}107448[#107448]) +* Add a SIMD (AVX2) optimised vector distance function for int7 on x64 {es-pull}108088[#108088] +* Add min/max range of the `event.ingested` field to cluster state for searchable snapshots {es-pull}106252[#106252] +* Add per-field KNN vector format to Index Segments API {es-pull}107216[#107216] +* Add support for hiragana_uppercase & katakana_uppercase token filters in kuromoji analysis plugin {es-pull}106553[#106553] +* Adding support for explain in rrf {es-pull}108682[#108682] +* Allow rescorer with field collapsing {es-pull}107779[#107779] (issue: {es-issue}27243[#27243]) +* Cut over stored fields to ZSTD for compression {es-pull}103374[#103374] +* Limit the value in prefix query {es-pull}108537[#108537] (issue: {es-issue}108486[#108486]) +* Make dense vector field type updatable {es-pull}106591[#106591] +* Multivalue Sparse Vector Support {es-pull}109007[#109007] + +Security:: +* Add bulk delete roles API {es-pull}110383[#110383] +* Remote cluster - API key security model - cluster privileges {es-pull}107493[#107493] + +Snapshot/Restore:: +* Denser in-memory representation of `ShardBlobsToDelete` {es-pull}109848[#109848] +* Log repo UUID at generation/registration time {es-pull}109672[#109672] +* Make repository analysis API available to non-operators {es-pull}110179[#110179] (issue: {es-issue}100318[#100318]) +* Track `RequestedRangeNotSatisfiedException` separately in S3 Metrics {es-pull}109657[#109657] + +Stats:: +* DocsStats: Add human readable bytesize {es-pull}109720[#109720] + +TSDB:: +* Optimise `time_series` aggregation for single value fields {es-pull}107990[#107990] +* Support `ignore_above` on keyword dimensions {es-pull}110337[#110337] + +Vector Search:: +* Adding hamming distance function to painless for `dense_vector` fields {es-pull}109359[#109359] +* Support k parameter for knn query {es-pull}110233[#110233] (issue: {es-issue}108473[#108473]) + +[[feature-8.15.0]] +[float] +=== New features + +Aggregations:: +* Opt `scripted_metric` out of parallelization {es-pull}109597[#109597] + +Application:: +* [Connector API] Add claim sync job endpoint {es-pull}109480[#109480] + +ES|QL:: +* ESQL: Add `ip_prefix` function {es-pull}109070[#109070] (issue: {es-issue}99064[#99064]) +* ESQL: Introduce a casting operator, `::` {es-pull}107409[#107409] +* ESQL: `top_list` aggregation {es-pull}109386[#109386] (issue: {es-issue}109213[#109213]) +* ESQL: add Arrow dataframes output format {es-pull}109873[#109873] +* Reapply "ESQL: Expose "_ignored" metadata field" {es-pull}108871[#108871] + +Infra/REST API:: +* Add a capabilities API to check node and cluster capabilities {es-pull}106820[#106820] + +Ingest Node:: +* Directly download commercial ip geolocation databases from providers {es-pull}110844[#110844] +* Mark the Redact processor as Generally Available {es-pull}110395[#110395] + +Logs:: +* Introduce logs index mode as Tech Preview {es-pull}108896[#108896] (issue: {es-issue}108896[#108896]) + +Machine Learning:: +* Add support for Azure AI Studio embeddings and completions to the inference service. {es-pull}108472[#108472] + +Mapping:: +* Add `semantic_text` field type and `semantic` query {es-pull}110338[#110338] +* Add generic fallback implementation for synthetic source {es-pull}108222[#108222] +* Add synthetic source support for `geo_shape` via fallback implementation {es-pull}108881[#108881] +* Add synthetic source support for binary fields {es-pull}107549[#107549] +* Enable fallback synthetic source by default {es-pull}109370[#109370] (issue: {es-issue}106460[#106460]) +* Enable fallback synthetic source for `point` and `shape` {es-pull}109312[#109312] +* Enable fallback synthetic source for `token_count` {es-pull}109044[#109044] +* Implement synthetic source support for annotated text field {es-pull}107735[#107735] +* Implement synthetic source support for range fields {es-pull}107081[#107081] +* Support arrays in fallback synthetic source implementation {es-pull}108878[#108878] +* Support synthetic source for `aggregate_metric_double` when ignore_malf… {es-pull}108746[#108746] + +Ranking:: +* Add text similarity reranker retriever {es-pull}109813[#109813] + +Relevance:: +* Mark Query Rules as GA {es-pull}110004[#110004] + +Search:: +* Add new int4 quantization to dense_vector {es-pull}109317[#109317] +* Adding RankFeature search phase implementation {es-pull}108538[#108538] +* Adding aggregations support for the `_ignored` field {es-pull}101373[#101373] (issue: {es-issue}59946[#59946]) +* Update Lucene version to 9.11 {es-pull}109219[#109219] + +Security:: +* Query Roles API {es-pull}108733[#108733] + +Transform:: +* Introduce _transform/_node_stats API {es-pull}107279[#107279] + +Vector Search:: +* Adds new `bit` `element_type` for `dense_vectors` {es-pull}110059[#110059] + +[[upgrade-8.15.0]] +[float] +=== Upgrades + +Infra/Plugins:: +* Update ASM to 9.7 for plugin scanner {es-pull}108822[#108822] (issue: {es-issue}108776[#108776]) + +Ingest Node:: +* Bump Tika dependency to 2.9.2 {es-pull}108144[#108144] + +Network:: +* Upgrade to Netty 4.1.109 {es-pull}108155[#108155] + +Search:: +* Upgrade to Lucene-9.11.1 {es-pull}110234[#110234] + +Security:: +* Upgrade bouncy castle (non-fips) to 1.78.1 {es-pull}108223[#108223] + +Snapshot/Restore:: +* Bump jackson version in modules:repository-azure {es-pull}109717[#109717] + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 0ed01ff422700..007dd740f34cf 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -68,6 +68,31 @@ improves query and merge speed significantly when compared to raw vectors. {es-pull}109317[#109317] +[discrete] +[[esql_inlinestats]] +=== ESQL: INLINESTATS +This adds the `INLINESTATS` command to ESQL which performs a STATS and +then enriches the results into the output stream. So, this query: + +[source,esql] +---- +FROM test +| INLINESTATS m=MAX(a * b) BY b +| WHERE m == a * b +| SORT a DESC, b DESC +| LIMIT 3 +---- + +Produces output like: + +| a | b | m | +| --- | --- | ----- | +| 99 | 999 | 98901 | +| 99 | 998 | 98802 | +| 99 | 997 | 98703 | + +{es-pull}109583[#109583] + [discrete] [[mark_query_rules_as_ga]] === Mark Query Rules as GA @@ -112,6 +137,18 @@ The Redact processor uses the Grok rules engine to obscure text in the input doc {es-pull}110395[#110395] +[discrete] +[[always_allow_rebalancing_by_default]] +=== Always allow rebalancing by default +In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to +`indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was +appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has +better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some +shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From +version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. + +{es-pull}111015[#111015] + // end::notable-highlights[] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 7c2e42a26b923..fabd495cdc525 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -649,8 +649,9 @@ tag::level[] + -- (Optional, string) -Indicates whether statistics are aggregated -at the cluster, index, or shard level. +Indicates whether statistics are aggregated at the cluster, index, or shard level. +If the shards level is requested, some additional +<> are shown. Valid values are: @@ -1326,13 +1327,21 @@ that lower ranked documents have more influence. This value must be greater than equal to `1`. Defaults to `60`. end::rrf-rank-constant[] -tag::rrf-window-size[] -`window_size`:: +tag::rrf-rank-window-size[] +`rank_window_size`:: (Optional, integer) + This value determines the size of the individual result sets per query. A higher value will improve result relevance at the cost of performance. The final ranked result set is pruned down to the search request's <>. -`window_size` must be greater than or equal to `size` and greater than or equal to `1`. +`rank_window_size` must be greater than or equal to `size` and greater than or equal to `1`. Defaults to the `size` parameter. -end::rrf-window-size[] +end::rrf-rank-window-size[] + +tag::rrf-filter[] +`filter`:: +(Optional, <>) ++ +Applies the specified <> to all of the specified sub-retrievers, +according to each retriever's specifications. +end::rrf-filter[] diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a8072b7ba549a..e4b6ef7f765c2 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -315,7 +315,7 @@ The result would then have the `errors` field set to `true` and hold the error f "details": { "my_admin_role": { <4> "type": "action_request_validation_exception", - "reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_data_stream_global_retention,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_data_stream_global_retention,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;" + "reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;" } } } diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 2e32324cb44d9..9cd91626c7600 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -78,6 +78,44 @@ IMPORTANT: The open point in time request and each subsequent search request can return different `id`; thus always use the most recently received `id` for the next search request. +In addition to the `keep_alive` parameter, the `allow_partial_search_results` parameter +can also be defined. +This parameter determines whether the <> +should tolerate unavailable shards or <> when +initially creating the PIT. +If set to true, the PIT will be created with the available shards, along with a +reference to any missing ones. +If set to false, the operation will fail if any shard is unavailable. +The default value is false. + +The PIT response includes a summary of the total number of shards, as well as the number +of successful shards when creating the PIT. + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_pit?keep_alive=1m&allow_partial_search_results=true +-------------------------------------------------- +// TEST[setup:my_index] + +[source,js] +-------------------------------------------------- +{ + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=", + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } +} +-------------------------------------------------- +// NOTCONSOLE + +When a PIT that contains shard failures is used in a search request, the missing are +always reported in the search response as a NoShardAvailableActionException exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing +from the previous PIT can be handled, assuming they become available in the meantime. + [[point-in-time-keep-alive]] ==== Keeping point in time alive The `keep_alive` parameter, which is passed to a open point in time request and diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index ed39ac786880b..58cc8ce9ef459 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -77,23 +77,48 @@ Collapses the top documents by a specified key into a single top document per ke When a retriever tree contains a compound retriever (a retriever with two or more child retrievers) *only* the query element is allowed. -===== Example +[discrete] +[[standard-retriever-example]] +==== Example [source,js] ---- -GET /index/_search +GET /restaurants/_search { - "retriever": { - "standard": { - "query" { ... }, - "filter" { ... }, - "min_score": ... + "retriever": { <1> + "standard": { <2> + "query": { <3> + "bool": { <4> + "should": [ <5> + { + "match": { <6> + "region": "Austria" + } + } + ], + "filter": [ <7> + { + "term": { <8> + "year": "2019" <9> + } + } + ] } - }, - "size": ... + } + } + } } ---- // NOTCONSOLE +<1> Opens the `retriever` object. +<2> The `standard` retriever is used for definining traditional {es} queries. +<3> The entry point for defining the search query. +<4> The `bool` object allows for combining multiple query clauses logically. +<5> The `should` array indicates conditions under which a document will match. Documents matching these conditions will increase their relevancy score. +<6> The `match` object finds documents where the `region` field contains the word "Austria." +<7> The `filter` array provides filtering conditions that must be met but do not contribute to the relevancy score. +<8> The `term` object is used for exact matches, in this case, filtering documents by the `year` field. +<9> The exact value to match in the `year` field. [[knn-retriever]] ==== kNN Retriever @@ -142,29 +167,39 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-similarity] The parameters `query_vector` and `query_vector_builder` cannot be used together. -===== Example: +[discrete] +[[knn-retriever-example]] +==== Example [source,js] ---- -GET /index/_search +GET my-embeddings/_search { - "retriever": { - "knn": { - "field": ..., - "query_vector": ..., - "k": ..., - "num_candidates": ... - } + "retriever": { + "knn": { <1> + "field": "vector", <2> + "query_vector": [10, 22, 77], <3> + "k": 10, <4> + "num_candidates": 10 <5> } + } } ---- // NOTCONSOLE +<1> Configuration for k-nearest neighbor (knn) search, which is based on vector similarity. +<2> Specifies the field name that contains the vectors. +<3> The query vector against which document vectors are compared in the `knn` search. +<4> The number of nearest neighbors to return as top hits. This value must be fewer than or equal to `num_candidates`. +<5> The size of the initial candidate set from which the final `k` nearest neighbors are selected. + [[rrf-retriever]] ==== RRF Retriever -An <> retriever returns top documents based on the RRF formula +An <> retriever returns top documents based on the RRF formula, equally weighting two or more child retrievers. +Reciprocal rank fusion (RRF) is a method for combining multiple result +sets with different relevance indicators into a single result set. ===== Parameters @@ -172,7 +207,9 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-filter] ===== Restrictions @@ -180,26 +217,103 @@ An RRF retriever is a compound retriever. Child retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. -===== Example +[discrete] +[[rrf-retriever-example-hybrid]] +==== Example: Hybrid search + +A simple hybrid search example (lexical search + dense vector search) combining a `standard` retriever with a `knn` retriever using RRF: [source,js] ---- -GET /index/_search +GET /restaurants/_search +{ + "retriever": { + "rrf": { <1> + "retrievers": [ <2> + { + "standard": { <3> + "query": { + "multi_match": { + "query": "San Francisco", + "fields": [ + "city", + "region" + ] + } + } + } + }, + { + "knn": { <4> + "field": "vector", + "query_vector": [10, 22, 77], + "k": 10, + "num_candidates": 10 + } + } + ], + "rank_constant": 1, <5> + "rank_window_size": 50 <6> + } + } +} +---- +// NOTCONSOLE +<1> Defines a retriever tree with an RRF retriever. +<2> The sub-retriever array. +<3> The first sub-retriever is a `standard` retriever. +<4> The second sub-retriever is a `knn` retriever. +<5> The rank constant for the RRF retriever. +<6> The rank window size for the RRF retriever. + +[discrete] +[[rrf-retriever-example-hybrid-sparse]] +==== Example: Hybrid search with sparse vectors + +A more complex hybrid search example (lexical search + ELSER sparse vector search + dense vector search) using RRF: + +[source,js] +---- +GET movies/_search { - "retriever": { - "rrf": { - "retrievers": [ - { - "standard" { ... } - }, - { - "knn": { ... } - } - ], - "rank_constant": ... - "rank_window_size": ... + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [10, 22, 77], + "k": 10, + "num_candidates": 10 + } } + ] } + } } ---- // NOTCONSOLE @@ -207,15 +321,31 @@ GET /index/_search [[text-similarity-reranker-retriever]] ==== Text Similarity Re-ranker Retriever -The `text_similarity_reranker` is a type of retriever that enhances search results by re-ranking documents based on semantic similarity to a specified inference text, using a machine learning model. +The `text_similarity_reranker` retriever uses an NLP model to improve search results by reordering the top-k documents based on their semantic similarity to the query. + +[TIP] +==== +Refer to <> for a high level overview of semantic reranking. +==== ===== Prerequisites To use `text_similarity_reranker` you must first set up a `rerank` task using the <>. -The `rerank` task should be set up with a machine learning model that can compute text similarity. -Currently you can integrate directly with the Cohere Rerank endpoint using the <> task, or upload a model to {es} <>. +The `rerank` task should be set up with a machine learning model that can compute text similarity. Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third-party text similarity models supported by {es}. + +Currently you can: + +* Integrate directly with the <> using the `rerank` task type +* Integrate directly with the <> using the `rerank` task type +* Upload a model to {es} with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland] using the `text_similarity` NLP task type. +** Then set up an <> with the `rerank` task type +** Refer to the <> on this page for a step-by-step guide. ===== Parameters +`retriever`:: +(Required, <>) ++ +The child retriever that generates the initial set of top documents to be re-ranked. `field`:: (Required, `string`) @@ -242,31 +372,128 @@ The number of top documents to consider in the re-ranking process. Defaults to ` + Sets a minimum threshold score for including documents in the re-ranked results. Documents with similarity scores below this threshold will be excluded. Note that score calculations vary depending on the model used. +`filter`:: +(Optional, <>) ++ +Applies the specified <> to the child <>. +If the child retriever already specifies any filters, then this top-level filter is applied in conjuction +with the filter defined in the child retriever. + ===== Restrictions A text similarity re-ranker retriever is a compound retriever. Child retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. -===== Example +[discrete] +[[text-similarity-reranker-retriever-example-cohere]] +==== Example: Cohere Rerank + +This example enables out-of-the-box semantic search by reranking top documents using the Cohere Rerank API. This approach eliminate the need to generate and store embeddings for all indexed documents. +This requires a <> using the `rerank` task type. [source,js] ---- GET /index/_search { - "retriever": { - "text_similarity_reranker": { - "retriever": { - "standard": { ... } + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match_phrase": { + "text": "landmark in Paris" + } + } } - }, - "field": "text", - "inference_id": "my-cohere-rerank-model", - "inference_text": "Most famous landmark in Paris", - "rank_window_size": 100, - "min_score": 0.5 + }, + "field": "text", + "inference_id": "my-cohere-rerank-model", + "inference_text": "Most famous landmark in Paris", + "rank_window_size": 100, + "min_score": 0.5 + } + } +} +---- +// NOTCONSOLE + +[discrete] +[[text-similarity-reranker-retriever-example-eland]] +==== Example: Semantic reranking with a Hugging Face model + +The following example uses the `cross-encoder/ms-marco-MiniLM-L-6-v2` model from Hugging Face to rerank search results based on semantic similarity. +The model must be uploaded to {es} using https://www.elastic.co/guide/en/elasticsearch/client/eland/current/machine-learning.html#ml-nlp-pytorch[Eland]. + +[TIP] +==== +Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third party text similarity models supported by {es}. +==== + +Follow these steps to load the model and create a semantic reranker. + +. Install Eland using `pip` ++ +[source,sh] +---- +python -m pip install eland[pytorch] +---- ++ +. Upload the model to {es} using Eland. This example assumes you have an Elastic Cloud deployment and an API key. Refer to the https://www.elastic.co/guide/en/elasticsearch/client/eland/current/machine-learning.html#ml-nlp-pytorch-auth[Eland documentation] for more authentication options. ++ +[source,sh] +---- +eland_import_hub_model \ + --cloud-id $CLOUD_ID \ + --es-api-key $ES_API_KEY \ + --hub-model-id cross-encoder/ms-marco-MiniLM-L-6-v2 \ + --task-type text_similarity \ + --clear-previous \ + --start +---- ++ +. Create an inference endpoint for the `rerank` task ++ +[source,js] +---- +PUT _inference/rerank/my-msmarco-minilm-model +{ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1, + "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" + } +} +---- +// NOTCONSOLE ++ +. Define a `text_similarity_rerank` retriever. ++ +[source,js] +---- +POST movies/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match": { + "genre": "drama" + } + } + } + }, + "field": "plot", + "inference_id": "my-msmarco-minilm-model", + "inference_text": "films that explore psychological depths" } + } } ---- // NOTCONSOLE ++ +This retriever uses a standard `match` query to search the `movie` index for films tagged with the genre "drama". +It then re-ranks the results based on semantic similarity to the text in the `inference_text` parameter, using the model we uploaded to {es}. ==== Using `from` and `size` with a retriever tree diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index fb474fe6bf4e6..2525dfff23b94 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -1,9 +1,7 @@ [[rrf]] === Reciprocal rank fusion -preview::["This functionality is in technical preview and may be changed or removed in a future release. -The syntax will likely change before GA. -Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] +preview::["This functionality is in technical preview and may be changed or removed in a future release. The syntax will likely change before GA. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf[Reciprocal rank fusion (RRF)] is a method for combining multiple result sets with different relevance indicators into a single result set. @@ -43,7 +41,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] An example request using RRF: diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index b9646f4d37303..49045acf4c484 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -63,6 +63,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=preference] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] [[search-shards-api-example]] ==== {api-examples-title} diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index 4a5efe09ea5a0..6b9b13b124e9f 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,24 +1,24 @@ -[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] |==== -| 18+^h| Remote cluster version +| 19+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 | 8.6 | 8.7 | 8.8 | 8.9 | 8.10 | 8.11 | 8.12 | 8.13 | 8.14 | 8.15 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.12 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.13 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.14 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.15 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} |==== diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc index 6525147839412..0f4640ebdf347 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -4,8 +4,6 @@ Deploy and manage LTR models ++++ -preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] - NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. For more information, see {subscriptions}. diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc index 2e9693eff0451..f14219e24bc11 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -4,8 +4,6 @@ Search using LTR ++++ -preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] - NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. For more information, see {subscriptions}. diff --git a/docs/reference/search/search-your-data/learning-to-rank.asciidoc b/docs/reference/search/search-your-data/learning-to-rank.asciidoc index 08fad9db9c0f6..ebd6d67fe42da 100644 --- a/docs/reference/search/search-your-data/learning-to-rank.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank.asciidoc @@ -1,8 +1,6 @@ [[learning-to-rank]] == Learning To Rank -preview::["The Learning To Rank feature is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] - NOTE: This feature was introduced in version 8.12.0 and is only available to certain subscription levels. For more information, see {subscriptions}. diff --git a/docs/reference/search/search-your-data/near-real-time.asciidoc b/docs/reference/search/search-your-data/near-real-time.asciidoc index 46a996c237c38..47618ecd9fd7a 100644 --- a/docs/reference/search/search-your-data/near-real-time.asciidoc +++ b/docs/reference/search/search-your-data/near-real-time.asciidoc @@ -2,7 +2,7 @@ [[near-real-time]] === Near real-time search -The overview of <> indicates that when a document is stored in {es}, it is indexed and fully searchable in _near real-time_--within 1 second. What defines near real-time search? +When a document is stored in {es}, it is indexed and fully searchable in _near real-time_--within 1 second. What defines near real-time search? Lucene, the Java libraries on which {es} is based, introduced the concept of per-segment search. A _segment_ is similar to an inverted index, but the word _index_ in Lucene means "a collection of segments plus a commit point". After a commit, a new segment is added to the commit point and the buffer is cleared. diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index a81598273dfd3..f69fd60be0484 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -106,9 +106,9 @@ The search response includes an array of `sort` values for each hit: "_id" : "654322", "_score" : null, "_source" : ..., - "sort" : [ + "sort" : [ 1463538855, - "654322" + "654322" ] }, { @@ -118,7 +118,7 @@ The search response includes an array of `sort` values for each hit: "_source" : ..., "sort" : [ <1> 1463538857, - "654323" + "654323" ] } ] @@ -150,7 +150,7 @@ GET twitter/_search -------------------------------------------------- //TEST[continued] -Repeat this process by updating the `search_after` array every time you retrieve a +Repeat this process by updating the `search_after` array every time you retrieve a new page of results. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to @@ -167,10 +167,12 @@ The API returns a PIT ID. [source,console-result] ---- { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + "_shards": ... } ---- // TESTRESPONSE[s/"id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=="/"id": $body.id/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards"/] To get the first page of results, submit a search request with a `sort` argument. If using a PIT, specify the PIT ID in the `pit.id` parameter and omit @@ -362,7 +364,7 @@ Perl:: Python:: - See https://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*] + See https://elasticsearch-py.readthedocs.io/en/stable/helpers.html[elasticsearch.helpers.*] JavaScript:: diff --git a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc index 99659ae76e092..c0fe7471946f3 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc @@ -13,23 +13,23 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -==== Retriever types +==== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: -* <>. Returns top documents from a -traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. -Mimics a traditional query but in the context of a retriever framework. This -ensures backward compatibility as existing `_search` requests remain supported. -That way you can transition to the new abstraction at your own pace without +* <>. Returns top documents from a +traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. +Mimics a traditional query but in the context of a retriever framework. This +ensures backward compatibility as existing `_search` requests remain supported. +That way you can transition to the new abstraction at your own pace without mixing syntaxes. -* <>. Returns top documents from a <>, +* <>. Returns top documents from a <>, in the context of a retriever framework. * <>. Combines and ranks multiple first-stage retrievers using -the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets +the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets with different relevance indicators into a single result set. -An RRF retriever is a *compound retriever*, where its `filter` element is +An RRF retriever is a *compound retriever*, where its `filter` element is propagated to its sub retrievers. + Sub retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. @@ -38,7 +38,7 @@ See the <> for detaile Requires first creating a `rerank` task using the <>. [discrete] -==== What makes retrievers useful? +==== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -140,7 +140,7 @@ GET example-index/_search ], "rank":{ "rrf":{ - "window_size":50, + "rank_window_size":50, "rank_constant":20 } } @@ -155,14 +155,14 @@ GET example-index/_search Here are some important terms: -* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to +* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to produce top hits. * *Retriever Tree*. A hierarchical structure that defines how retrievers interact. * *First-stage Retriever*. Returns an initial set of candidate documents. -* *Compound Retriever*. Builds on one or more retrievers, +* *Compound Retriever*. Builds on one or more retrievers, enhancing document retrieval and ranking logic. -* *Combiners*. Compound retrievers that merge top hits -from multiple sub-retrievers. +* *Combiners*. Compound retrievers that merge top hits +from multiple sub-retrievers. * *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers. [discrete] @@ -180,4 +180,4 @@ Refer to the {kibana-ref}/playground.html[Playground documentation] for more inf [[retrievers-overview-api-reference]] ==== API reference -For implementation details, including notable restrictions, check out the <> in the Search API docs. \ No newline at end of file +For implementation details, including notable restrictions, check out the <> in the Search API docs. diff --git a/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc index 75c06aa953302..add2d7455983e 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc @@ -5,7 +5,7 @@ preview::[] [TIP] ==== -This overview focuses more on the high-level concepts and use cases for semantic reranking. For full implementation details on how to set up and use semantic reranking in {es}, see the <> in the Search API docs. +This overview focuses more on the high-level concepts and use cases for semantic reranking. For full implementation details on how to set up and use semantic reranking in {es}, see the <> in the Search API docs. ==== Rerankers improve the relevance of results from earlier-stage retrieval mechanisms. @@ -89,11 +89,16 @@ In {es}, semantic rerankers are implemented using the {es} <>. +. *Choose a reranking model*. +Currently you can: + +** Integrate directly with the <> using the `rerank` task type +** Integrate directly with the <> using the `rerank` task type +** Upload a model to {es} from Hugging Face with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland]. You'll need to use the `text_similarity` NLP task type when loading the model using Eland. Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third party text similarity models supported by {es} for semantic reranking. +*** Then set up an <> with the `rerank` task type +. *Create a `rerank` task using the <>*. The Inference API creates an inference endpoint and configures your chosen machine learning model to perform the reranking task. -. Define a `text_similarity_reranker` retriever in your search request. +. *Define a `text_similarity_reranker` retriever in your search request*. The retriever syntax makes it simple to configure both the retrieval and reranking of search results in a single API call. .*Example search request* with semantic reranker @@ -127,20 +132,6 @@ POST _search // TEST[skip:TBD] ============== -[discrete] -[[semantic-reranking-types]] -==== Supported reranking types - -The following `text_similarity_reranker` model configuration options are available. - -*Text similarity with cross-encoder* - -This solution uses a hosted or 3rd party inference service which relies on a cross-encoder model. -The model receives the text fields from the _top-K_ documents, as well as the search query, and calculates scores directly, which are then used to rerank the documents. - -Used with the Cohere inference service rolled out in 8.13, turn on semantic reranking that works out of the box. -Check out our https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/cohere/cohere-elasticsearch.ipynb[Python notebook] for using Cohere with {es}. - [discrete] [[semantic-reranking-learn-more]] ==== Learn more diff --git a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc index 594c22fb65981..18be825d02376 100644 --- a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc +++ b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc @@ -37,9 +37,10 @@ When defining a rule, consider the following: ===== Rule type The type of rule we want to apply. -For the moment there is a single rule type: +We support the following rule types: * `pinned` will re-write the query into a <>, pinning specified results matching the query rule at the top of the returned result set. +* `exclude` will exclude specified results from the returned result set. [discrete] [[query-rule-criteria]] @@ -91,12 +92,11 @@ Allowed criteria types are: The actions to take when the rule matches a query: -* `ids` will pin the specified <>s. -* `docs` will pin the specified documents in the specified indices. +* `ids` will select the specified <>s. +* `docs` will select the specified documents in the specified indices. Use `ids` when searching over a single index, and `docs` when searching over multiple indices. `ids` and `docs` cannot be combined in the same query. -See <> for details. [discrete] [[add-query-rules]] @@ -105,10 +105,10 @@ See <> for details. You can add query rules using the <> call. This adds a ruleset containing one or more query rules that will be applied to queries that match their specified criteria. -The following command will create a query ruleset called `my-ruleset` with two pinned document rules: +The following command will create a query ruleset called `my-ruleset` with two query rules: * The first rule will generate a <> pinning the <>s `id1` and `id2` when the `query_string` metadata value is a fuzzy match to either `puggles` or `pugs` _and_ the user's location is in the US. -* The second rule will generate a <> pinning the <> of `id3` specifically from the `my-index-000001` index and `id4` from the `my-index-000002` index when the `query_string` metadata value contains `beagles`. +* The second rule will generate a query that excludes the <> `id3` specifically from the `my-index-000001` index and `id4` from the `my-index-000002` index when the `query_string` metadata value contains `beagles`. //// [source,console] @@ -147,7 +147,7 @@ PUT /_query_rules/my-ruleset }, { "rule_id": "rule2", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "contains", @@ -222,7 +222,8 @@ This rule query will match against `rule1` in the defined query ruleset, and wil Any other matches from the organic query will be returned below the pinned results. It's possible to have multiple rules in a ruleset match a single <>. -In this case, the pinned documents are returned in the following order: +In this case, the rules are applied in the following order: - Where the matching rule appears in the ruleset - If multiple documents are specified in a single rule, in the order they are specified +- If a document is matched by both a `pinned` rule and an `exclude` rule, the `exclude` rule will take precedence diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index ae27b46d4b876..719aeb070fc7c 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -11,13 +11,13 @@ IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer The following examples use Cohere's `embed-english-v3.0` model, the `all-mpnet-base-v2` model from HuggingFace, and OpenAI's `text-embedding-ada-002` second generation embedding model. You can use any Cohere and OpenAI models, they are all supported by the {infer} API. -For a list of supported models available on HuggingFace, refer to -<>. +For a list of recommended models available on HuggingFace, refer to <>. Azure based examples use models available through https://ai.azure.com/explore/models?selectedTask=embeddings[Azure AI Studio] or https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models[Azure OpenAI]. Mistral examples use the `mistral-embed` model from https://docs.mistral.ai/getting-started/models/[the Mistral API]. Amazon Bedrock examples use the `amazon.titan-embed-text-v1` model from https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html[the Amazon Bedrock base models]. +AlibabaCloud AI Search examples use the `ops-text-embedding-zh-001` model from https://help.aliyun.com/zh/open-search/search-platform/developer-reference/text-embedding-api-details[the AlibabaCloud AI Search base models]. Click the name of the service you want to use on any of the widgets below to review the corresponding instructions. @@ -40,8 +40,7 @@ include::{es-ref-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] ==== Create the index mapping The mapping of the destination index - the index that contains the embeddings that the model will create based on your input text - must be created. -The destination index must have a field with the <> -field type to index the output of the used model. +The destination index must have a field with the <> field type for most models and the <> field type for the sparse vector models like in the case of the `elser` service to index the output of the used model. include::{es-ref-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc[] @@ -49,8 +48,7 @@ include::{es-ref-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciido [[infer-service-inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor -Create an <> with an -<> and use the model you created above to infer against the data that is being ingested in the pipeline. +Create an <> with an <> and use the model you created above to infer against the data that is being ingested in the pipeline. include::{es-ref-dir}/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc[] @@ -103,8 +101,8 @@ POST _tasks//_cancel ==== Semantic search After the data set has been enriched with the embeddings, you can query the data using {ref}/knn-search.html#knn-semantic-search[semantic search]. -Pass a -`query_vector_builder` to the k-nearest neighbor (kNN) vector search API, and provide the query text and the model you have used to create the embeddings. +In case of dense vector models, pass a `query_vector_builder` to the k-nearest neighbor (kNN) vector search API, and provide the query text and the model you have used to create the embeddings. +In case of a sparse vector model like ELSER, use a `sparse_vector` query, and provide the query text with the model you have used to create the embeddings. NOTE: If you cancelled the reindexing process, you run the query only a part of the data which affects the quality of your results. diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index ba25cebcd1e1a..b47bc2370ab10 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -23,6 +23,11 @@ For more information, see <>. [[searchable-snapshots-api-mount-desc]] ==== {api-description-title} +This API mounts a snapshot as a searchable snapshot index. + +Don't use this API for snapshots managed by {ilm-init}. Manually mounting +{ilm-init}-managed snapshots can <> with +<>. [[searchable-snapshots-api-mount-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index 12b6f2477a93c..a38971a0bae6a 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -170,6 +170,20 @@ do not have a dedicated frozen tier, you must configure the cache on one or more nodes. Partially mounted indices are only allocated to nodes that have a shared cache. +[[manually-mounting-snapshots]] +[WARNING] +.Manual snapshot mounting +==== +Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can +interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss +or complications with snapshot handling. + +For optimal results, allow {ilm-init} to manage +snapshots automatically. + +<>. +==== + [[searchable-snapshots-shared-cache]] `xpack.searchable.snapshot.shared_cache.size`:: (<>) @@ -325,6 +339,11 @@ cluster has write access then you must make sure that the other cluster does not delete these snapshots. The snapshot contains the sole full copy of your data. If you delete it then the data cannot be recovered from elsewhere. +* The data in a searchable snapshot index are cached in local storage, so if you +delete the underlying searchable snapshot {es} will continue to operate normally +until the first cache miss. This may be much later, for instance when a shard +relocates to a different node, or when the node holding the shard restarts. + * If the repository fails or corrupts the contents of the snapshot and you cannot restore it to its previous healthy state then the data is permanently lost. diff --git a/docs/reference/security/authorization/field-and-document-access-control.asciidoc b/docs/reference/security/authorization/field-and-document-access-control.asciidoc index f4d4fcd49a35f..7c7ea75ece161 100644 --- a/docs/reference/security/authorization/field-and-document-access-control.asciidoc +++ b/docs/reference/security/authorization/field-and-document-access-control.asciidoc @@ -54,8 +54,11 @@ specify any field restrictions. If you assign a user both roles, `role_a` gives the user access to all documents and `role_b` gives the user access to all fields. +[IMPORTANT] +=========== If you need to restrict access to both documents and fields, consider splitting documents by index instead. +=========== include::role-templates.asciidoc[] include::set-security-user.asciidoc[] diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 145bd8ebc06bb..747b1eef40441 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -102,9 +102,7 @@ deprecated[7.5] Use `manage_transform` instead. This privilege is not available in {serverless-full}. `manage_data_stream_global_retention`:: -All operations related to managing the data stream global retention settings. -+ -This privilege is not available in {serverless-full}. +This privilege has no effect.deprecated[8.16] `manage_enrich`:: All operations related to managing and executing enrich policies. @@ -229,9 +227,7 @@ All cluster read-only operations, like cluster health and state, hot threads, node info, node and cluster stats, and pending cluster tasks. `monitor_data_stream_global_retention`:: -Allows the retrieval of the data stream global retention settings. -+ -This privilege is not available in {serverless-full}. +This privilege has no effect.deprecated[8.16] `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc index 0f00e956472d0..4b055525d4e6c 100644 --- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -10,6 +10,18 @@ These are the settings available for configuring <>, <>) +The maximum retention period that will apply to all user data streams managed by the data stream lifecycle. The max retention will also +override the retention of a data stream whose configured retention exceeds the max retention. It should be greater than `10s`. + +[[data-streams-lifecycle-retention-default]] +`data_streams.lifecycle.retention.default`:: +(<>, <>) +The retention period that will apply to all user data streams managed by the data stream lifecycle that do not have retention configured. +It should be greater than `10s` and less or equals than <>. + [[data-streams-lifecycle-poll-interval]] `data_streams.lifecycle.poll_interval`:: (<>, <>) diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 4a48c26974084..145112ef4d27c 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -42,6 +42,11 @@ Specifies the path to a file that contains a key for encrypting sensitive data. If `xpack.watcher.encrypt_sensitive_data` is set to `true`, this setting is required. For more information, see <>. +`xpack.watcher.max.history.record.size`:: +(<>) +The maximum size watcher history record that can be written into the watcher history index. Any larger history record will have some of +its larger fields removed. Defaults to 10mb. + `xpack.http.proxy.host`:: (<>) Specifies the address of the proxy server to use to connect to HTTP services. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 64626aafb2441..b346fddc5e5a1 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -33,7 +33,6 @@ include::setup/configuration.asciidoc[] include::setup/important-settings.asciidoc[] - include::setup/secure-settings.asciidoc[] include::settings/audit-settings.asciidoc[] @@ -82,6 +81,8 @@ include::modules/indices/search-settings.asciidoc[] include::settings/security-settings.asciidoc[] +include::modules/shard-ops.asciidoc[] + include::modules/indices/request_cache.asciidoc[] include::settings/snapshot-settings.asciidoc[] @@ -93,7 +94,9 @@ include::modules/threadpool.asciidoc[] include::settings/notification-settings.asciidoc[] include::setup/advanced-configuration.asciidoc[] + include::setup/sysconfig.asciidoc[] + include::setup/bootstrap-checks.asciidoc[] include::setup/bootstrap-checks-xes.asciidoc[] diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 9488c6632836b..a3bf7723cb5a9 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -11,7 +11,7 @@ time, so the service remains uninterrupted. [WARNING] ==== Nodes exceeding the low watermark threshold will be slow to restart. Reduce the disk -usage below the <> before to restarting nodes. +usage below the <> before restarting nodes. ==== [discrete] diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index be265554deef5..51ad571ee12e7 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -100,13 +100,19 @@ Minimum number of snapshots to retain, even if the snapshots have expired. ==== `schedule`:: -(Required, <>) +(Required, <> or <>) Periodic or absolute schedule at which the policy creates snapshots. {slm-init} applies `schedule` changes immediately. +Schedule may be either a Cron schedule or a time unit describing the interval between snapshots. +When using a time unit interval, the first snapshot is scheduled one interval after the policy modification time, and then again every interval after. + [[slm-api-put-example]] ==== {api-examples-title} + +[[slm-api-put-daily-policy]] +===== Create a policy Create a `daily-snapshots` lifecycle policy: [source,console] @@ -138,4 +144,25 @@ PUT /_slm/policy/daily-snapshots <6> Optional retention configuration <7> Keep snapshots for 30 days <8> Always keep at least 5 successful snapshots, even if they're more than 30 days old -<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old \ No newline at end of file +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old + + +[[slm-api-put-hourly-policy]] +===== Use Interval Scheduling +Create an `hourly-snapshots` lifecycle policy using interval scheduling: + +[source,console] +-------------------------------------------------- +PUT /_slm/policy/hourly-snapshots +{ + "schedule": "1h", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"] + } +} +-------------------------------------------------- +// TEST[setup:setup-repository] +Creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, +with subsequent snapshots being created every hour afterward. diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc index d8b03cbc0e880..e677408da3f25 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc @@ -4,7 +4,7 @@ Get snapshot status ++++ -Retrieves a detailed description of the current state for each shard participating in the snapshot. +Retrieves a detailed description of the current state for each shard participating in the snapshot. Note that this API should only be used to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed, or you want to obtain information about one or more existing snapshots, use the <>. //// [source,console] @@ -172,13 +172,8 @@ Indicates the current snapshot state. `STARTED`:: The snapshot is currently running. -`PARTIAL`:: - The global cluster state was stored, but data of at least one shard was not stored successfully. - The <> section of the response contains more detailed information about shards - that were not processed correctly. - `SUCCESS`:: - The snapshot finished and all shards were stored successfully. + The snapshot completed. ==== -- diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index f3d04159bc025..c361414052e14 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -1,56 +1,72 @@ [[repository-azure]] === Azure repository -You can use https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction[Azure Blob storage] as a repository for <>. +You can use +https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction[Azure +Blob storage] as a repository for <>. [[repository-azure-usage]] ==== Setup -To enable Azure repositories, you have first to define your Azure storage settings as -{ref}/secure-settings.html[secure settings]. +To enable Azure repositories, first configure an Azure repository client by +specifying one or more settings of the form +`azure.client.CLIENT_NAME.SETTING_NAME`. By default, `azure` repositories use a +client named `default`, but you may specify a different client name when +registering each repository. -You can define these settings before the node is started, or call the <> after the settings are defined to apply them to a running node. +The only mandatory Azure repository client setting is `account`, which is a +{ref}/secure-settings.html[secure setting] defined in the <>. To provide this setting, use the `elasticsearch-keystore` tool on +each node: [source,sh] ---------------------------------------------------------------- bin/elasticsearch-keystore add azure.client.default.account -bin/elasticsearch-keystore add azure.client.default.key ---------------------------------------------------------------- -Note that you can also define more than one account: +If you adjust this setting after a node has started, call the +<> to +reload the new value. + +You may define more than one client by setting their `account` values. For +instance, to set the `default` client and another client called `secondary`, run +the following commands on each node: [source,sh] ---------------------------------------------------------------- bin/elasticsearch-keystore add azure.client.default.account -bin/elasticsearch-keystore add azure.client.default.key bin/elasticsearch-keystore add azure.client.secondary.account -bin/elasticsearch-keystore add azure.client.secondary.sas_token ---------------------------------------------------------------- -For more information about these settings, see -<>. +The `key` and `sas_token` settings are also secure settings and can be set using +commands like the following: -[IMPORTANT] -.Supported Azure Storage Account types -=============================================== -The Azure repository type works with all Standard storage accounts +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore add azure.client.default.key +bin/elasticsearch-keystore add azure.client.secondary.sas_token +---------------------------------------------------------------- -* Standard Locally Redundant Storage - `Standard_LRS` -* Standard Zone-Redundant Storage - `Standard_ZRS` -* Standard Geo-Redundant Storage - `Standard_GRS` -* Standard Read Access Geo-Redundant Storage - `Standard_RAGRS` +Other Azure repository client settings must be set in `elasticsearch.yml` before +the node starts. For example: -https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage. -=============================================== +[source,yaml] +---- +azure.client.default.timeout: 10s +azure.client.default.max_retries: 7 +azure.client.default.endpoint_suffix: core.chinacloudapi.cn +azure.client.secondary.timeout: 30s +---- -[[repository-azure-client-settings]] -==== Client settings +In this example, the client side timeout is `10s` per try for repositories which +use the `default` client, with `7` retries before failing and an endpoint +suffix of `core.chinacloudapi.cn`. Repositories which use the `secondary` client +will have a timeout of `30s` per try, but will use the default endpoint and will +fail after the default number of retries. -The client that you use to connect to Azure has a number of settings available. -The settings have the form `azure.client.CLIENT_NAME.SETTING_NAME`. By default, -`azure` repositories use a client named `default`, but this can be modified using -the <> `client`. -For example: +Once an Azure repository client is configured correctly, register an Azure +repository as follows, providing the client name using the `client` +<>: [source,console] ---- @@ -64,48 +80,45 @@ PUT _snapshot/my_backup ---- // TEST[skip:we don't have azure setup while testing this] -Most client settings can be added to the `elasticsearch.yml` configuration file. -For example: +If you are using the `default` client, you may omit the `client` repository +setting: -[source,yaml] +[source,console] ---- -azure.client.default.timeout: 10s -azure.client.default.max_retries: 7 -azure.client.default.endpoint_suffix: core.chinacloudapi.cn -azure.client.secondary.timeout: 30s +PUT _snapshot/my_backup +{ + "type": "azure" +} ---- - -In this example, the client side timeout is `10s` per try for the `default` -account with `7` retries before failing. The endpoint suffix is -`core.chinacloudapi.cn` and `30s` per try for the `secondary` account with `3` -retries. - -The `account`, `key`, and `sas_token` storage settings are reloadable secure -settings, which you add to the {es} keystore. For more information about -creating and updating the {es} keystore, see -{ref}/secure-settings.html[Secure settings]. After you reload the settings, the -internal Azure clients, which are used to transfer the snapshot, utilize the -latest settings from the keystore. +// TEST[skip:we don't have azure setup while testing this] NOTE: In progress snapshot or restore jobs will not be preempted by a *reload* of the storage secure settings. They will complete using the client as it was built when the operation started. -The following list contains the available client settings. Those that must be -stored in the keystore are marked as "secure"; the other settings belong in the -`elasticsearch.yml` file. +[[repository-azure-client-settings]] +==== Client settings + +The following list describes the available client settings. Those that must be +stored in the keystore are marked as ({ref}/secure-settings.html[Secure], +{ref}/secure-settings.html#reloadable-secure-settings[reloadable]); the other +settings must be stored in the `elasticsearch.yml` file. The default +`CLIENT_NAME` is `default` but you may configure a client with a different name +and specify that client by name when registering a repository. -`account` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The Azure account name, which is used by the repository's internal Azure client. +`azure.client.CLIENT_NAME.account` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: + The Azure account name, which is used by the repository's internal Azure + client. This setting is required for all clients. -`endpoint_suffix`:: +`azure.client.CLIENT_NAME.endpoint_suffix`:: The Azure endpoint suffix to connect to. The default value is `core.windows.net`. -`key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The Azure secret key, which is used by the repository's internal Azure client. Alternatively, use `sas_token`. +`azure.client.CLIENT_NAME.key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: + The Azure secret key, which is used by the repository's internal Azure client. + Alternatively, use `sas_token`. -`max_retries`:: +`azure.client.CLIENT_NAME.max_retries`:: The number of retries to use when an Azure request fails. This setting helps control the exponential backoff policy. It specifies the number of retries that must occur before the snapshot fails. The default value is `3`. The @@ -113,19 +126,20 @@ stored in the keystore are marked as "secure"; the other settings belong in the of wait time before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as `90s`. -`proxy.host`:: - The host name of a proxy to connect to Azure through. For example: `azure.client.default.proxy.host: proxy.host`. +`azure.client.CLIENT_NAME.proxy.host`:: + The host name of a proxy to connect to Azure through. By default, no proxy is + used. -`proxy.port`:: - The port of a proxy to connect to Azure through. For example, `azure.client.default.proxy.port: 8888`. +`azure.client.CLIENT_NAME.proxy.port`:: + The port of a proxy to connect to Azure through. By default, no proxy is used. -`proxy.type`:: +`azure.client.CLIENT_NAME.proxy.type`:: Register a proxy type for the client. Supported values are `direct`, `http`, and `socks`. For example: `azure.client.default.proxy.type: http`. When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must also be provided. The default value is `direct`. -`sas_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: +`azure.client.CLIENT_NAME.sas_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: A shared access signatures (SAS) token, which the repository's internal Azure client uses for authentication. The SAS token must have read (r), write (w), list (l), and delete (d) permissions for the repository base path and all its @@ -133,28 +147,78 @@ stored in the keystore are marked as "secure"; the other settings belong in the to resource types service (s), container (c), and object (o). Alternatively, use `key`. -`timeout`:: - The client side timeout for any single request to Azure. The value should - specify the time unit. For example, a value of `5s` specifies a 5 second +`azure.client.CLIENT_NAME.timeout`:: + The client side timeout for any single request to Azure, as a + <>. For example, a value of `5s` specifies a 5 second timeout. There is no default value, which means that {es} uses the - https://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value] - set by the Azure client (known as 5 minutes). This setting can be defined - globally, per account, or both. + https://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default + value] set by the Azure client. + +`azure.client.CLIENT_NAME.endpoint`:: + The Azure endpoint to connect to. It must include the protocol used to connect + to Azure. + +`azure.client.CLIENT_NAME.secondary_endpoint`:: + The Azure secondary endpoint to connect to. It must include the protocol used + to connect to Azure. + +[[repository-azure-default-credentials]] +[NOTE] +.Obtaining credentials from the environment +====================================================== +If you specify neither the `key` nor the `sas_token` settings for a client then +{es} will attempt to automatically obtain credentials from the environment in +which it is running using mechanisms built into the Azure SDK. This is ideal +for when running {es} on the Azure platform. + +When running {es} on an +https://azure.microsoft.com/en-gb/products/virtual-machines[Azure Virtual +Machine], you should use +https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview[Azure +Managed Identity] to provide credentials to {es}. To use Azure Managed Identity, +assign a suitably authorized identity to the Azure Virtual Machine on which {es} +is running. + +When running {es} in +https://azure.microsoft.com/en-gb/products/kubernetes-service[Azure Kubernetes +Service], for instance using {eck-ref}[{eck}], you should use +https://azure.github.io/azure-workload-identity/docs/introduction.html[Azure +Workload Identity] to provide credentials to {es}. To use Azure Workload +Identity, mount the `azure-identity-token` volume as a subdirectory of the +<> and set the +`AZURE_FEDERATED_TOKEN_FILE` environment variable to point to a file called +`azure-identity-token` within the mounted volume. + +The Azure SDK has several other mechanisms to automatically obtain credentials +from its environment, but the two methods described above are the only ones +that are tested and supported for use in {es}. +====================================================== -`endpoint`:: - The Azure endpoint to connect to. It must include the protocol used to connect to Azure. -`secondary_endpoint`:: - The Azure secondary endpoint to connect to. It must include the protocol used to connect to Azure. [[repository-azure-repository-settings]] ==== Repository settings -The Azure repository supports following settings: +The Azure repository supports the following settings, which may be specified +when registering an Azure repository as follows: + +[source,console] +---- +PUT _snapshot/my_backup +{ + "type": "azure", + "settings": { + "client": "secondary", + "container": "my_container", + "base_path": "snapshots_prefix" + } +} +---- +// TEST[skip:we don't have azure setup while testing this] `client`:: - Azure named client to use. Defaults to `default`. + The name of the Azure repository client to use. Defaults to `default`. `container`:: @@ -172,13 +236,15 @@ multiple deployments may share the same bucket. `chunk_size`:: - Big files can be broken down into multiple smaller blobs in the blob store during snapshotting. - It is not recommended to change this value from its default unless there is an explicit reason for limiting the - size of blobs in the repository. Setting a value lower than the default can result in an increased number of API - calls to the Azure blob store during snapshot create as well as restore operations compared to using the default - value and thus make both operations slower as well as more costly. - Specify the chunk size as a value and unit, for example: - `10MB`, `5KB`, `500B`. Defaults to the maximum size of a blob in the Azure blob store which is `5TB`. + Big files can be broken down into multiple smaller blobs in the blob store + during snapshotting. It is not recommended to change this value from its + default unless there is an explicit reason for limiting the size of blobs in + the repository. Setting a value lower than the default can result in an + increased number of API calls to the Azure blob store during snapshot create + as well as restore operations compared to using the default value and thus + make both operations slower as well as more costly. Specify the chunk size + as a <>, for example: `10MB`, `5KB`, `500B`. Defaults + to the maximum size of a blob in the Azure blob store which is `5TB`. `compress`:: @@ -193,59 +259,6 @@ include::repository-shared-settings.asciidoc[] `primary_only` or `secondary_only`. Defaults to `primary_only`. Note that if you set it to `secondary_only`, it will force `readonly` to true. -Some examples, using scripts: - -[source,console] ----- -# The simplest one -PUT _snapshot/my_backup1 -{ - "type": "azure" -} - -# With some settings -PUT _snapshot/my_backup2 -{ - "type": "azure", - "settings": { - "container": "backup-container", - "base_path": "backups", - "chunk_size": "32MB", - "compress": true - } -} - - -# With two accounts defined in elasticsearch.yml (my_account1 and my_account2) -PUT _snapshot/my_backup3 -{ - "type": "azure", - "settings": { - "client": "secondary" - } -} -PUT _snapshot/my_backup4 -{ - "type": "azure", - "settings": { - "client": "secondary", - "location_mode": "primary_only" - } -} ----- -// TEST[skip:we don't have azure setup while testing this] - -Example using Java: - -[source,java] ----- -client.admin().cluster().preparePutRepository("my_backup_java1") - .setType("azure").setSettings(Settings.builder() - .put(Storage.CONTAINER, "backup-container") - .put(Storage.CHUNK_SIZE, new ByteSizeValue(32, ByteSizeUnit.MB)) - ).get(); ----- - [[repository-azure-validation]] ==== Repository validation rules @@ -260,6 +273,19 @@ permitted in container names. * All letters in a container name must be lowercase. * Container names must be from 3 through 63 characters long. +[IMPORTANT] +.Supported Azure Storage Account types +=============================================== +The Azure repository type works with all Standard storage accounts + +* Standard Locally Redundant Storage - `Standard_LRS` +* Standard Zone-Redundant Storage - `Standard_ZRS` +* Standard Geo-Redundant Storage - `Standard_GRS` +* Standard Read Access Geo-Redundant Storage - `Standard_RAGRS` + +https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage. +=============================================== + [[repository-azure-linearizable-registers]] ==== Linearizable register implementation diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index d757a74110ca9..3a9c12caebad9 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -317,6 +317,15 @@ include::repository-shared-settings.asciidoc[] https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html[AWS DeleteObjects API]. +`max_multipart_upload_cleanup_size`:: + + (<>) Sets the maximum number of possibly-dangling multipart + uploads to clean up in each batch of snapshot deletions. Defaults to `1000` + which is the maximum number supported by the + https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[AWS + ListMultipartUploads API]. If set to `0`, {es} will not attempt to clean up + dangling multipart uploads. + NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated, and will be removed in a future version. @@ -492,33 +501,6 @@ by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. -==== Cleaning up multi-part uploads - -{es} uses S3's multi-part upload process to upload larger blobs to the -repository. The multi-part upload process works by dividing each blob into -smaller parts, uploading each part independently, and then completing the -upload in a separate step. This reduces the amount of data that {es} must -re-send if an upload fails: {es} only needs to re-send the part that failed -rather than starting from the beginning of the whole blob. The storage for each -part is charged independently starting from the time at which the part was -uploaded. - -If a multi-part upload cannot be completed then it must be aborted in order to -delete any parts that were successfully uploaded, preventing further storage -charges from accumulating. {es} will automatically abort a multi-part upload on -failure, but sometimes the abort request itself fails. For example, if the -repository becomes inaccessible or the instance on which {es} is running is -terminated abruptly then {es} cannot complete or abort any ongoing uploads. - -You must make sure that failed uploads are eventually aborted to avoid -unnecessary storage costs. You can use the -https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[List -multipart uploads API] to list the ongoing uploads and look for any which are -unusually long-running, or you can -https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpu-abort-incomplete-mpu-lifecycle-config.html[configure -a bucket lifecycle policy] to automatically abort incomplete uploads once they -reach a certain age. - [[repository-s3-aws-vpc]] ==== AWS VPC bandwidth settings diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc index 6039d1de5345b..3a686e27cf580 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-ingest-cohere"> Cohere + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc index f95c4a6dbc8c8..6678b60fabc40 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc @@ -35,6 +35,32 @@ and the `output_field` that will contain the {infer} results. // end::cohere[] +// tag::elser[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/elser_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "elser_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference endpoint you created by using the +<>, it's referred to as `inference_id` in that step. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::elser[] + // tag::hugging-face[] [source,console] @@ -190,3 +216,29 @@ PUT _ingest/pipeline/amazon_bedrock_embeddings and the `output_field` that will contain the {infer} results. // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/alibabacloud_ai_search_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "alibabacloud_ai_search_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference endpoint you created by using the +<>, it's referred to as `inference_id` in that step. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::alibabacloud-ai-search[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc index 66b0cde549545..66b790bdd57a5 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-mapping-cohere"> Cohere + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc index 72c648e63871d..c86538ceb9c87 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -31,6 +31,34 @@ the {infer} pipeline configuration in the next step. // end::cohere[] +// tag::elser[] + +[source,console] +-------------------------------------------------- +PUT elser-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "sparse_vector" <2> + }, + "content": { <3> + "type": "text" <4> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `sparse_vector` field for ELSER. +<3> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<4> The field type which is text in this example. + +// end::elser[] + // tag::hugging-face[] [source,console] @@ -242,3 +270,35 @@ the {infer} pipeline configuration in the next step. <6> The field type which is text in this example. // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] + +[source,console] +-------------------------------------------------- +PUT alibabacloud-ai-search-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1024, <3> + "element_type": "float" + }, + "content": { <4> + "type": "text" <5> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be referenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. This value may be different depending on the underlying model used. +See the https://help.aliyun.com/zh/open-search/search-platform/developer-reference/text-embedding-api-details[AlibabaCloud AI Search embedding model] documentation. +<4> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<5> The field type which is text in this example. + +// end::alibabacloud-ai-search[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc index 9a8028e2b3c6c..86f52fee2063c 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-reindex-cohere"> Cohere + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc index 995189f1309aa..25d4023c650c0 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc @@ -25,6 +25,29 @@ may affect the throughput of the reindexing process. // end::cohere[] +// tag::elser[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "elser-embeddings", + "pipeline": "elser_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +// end::elser[] + // tag::hugging-face[] [source,console] @@ -177,3 +200,26 @@ number makes the update of the reindexing process quicker which enables you to follow the progress closely and detect errors early. // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "alibabacloud-ai-search-embeddings", + "pipeline": "alibabacloud_ai_search_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +// end::alibabacloud-ai-search[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc index cf2e4994279d9..fb686a2d8be12 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-requirements-cohere"> Cohere + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc index 856e4d5f0fe47..c9e7ca8b80ba6 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc @@ -5,6 +5,13 @@ the Cohere service. // end::cohere[] +// tag::elser[] + +ELSER is a model trained by Elastic. If you have an {es} deployment, there is no +further requirement for using the {infer} API with the `elser` service. + +// end::elser[] + // tag::hugging-face[] A https://huggingface.co/[HuggingFace account] is required to use the {infer} @@ -45,3 +52,9 @@ You can apply for access to Azure OpenAI by completing the form at https://aka.m * A pair of access and secret keys used to access Amazon Bedrock // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] +* An AlibabaCloud Account with https://console.aliyun.com[AlibabaCloud] access +* An API key generated for your account from the https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[API keys section] + +// end::alibabacloud-ai-search[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc index 52cf65c4a1509..996148d80a4bd 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc @@ -7,6 +7,12 @@ id="infer-api-search-cohere"> Cohere + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc index 5e23afeb19a9f..fe1f58b6bd1a9 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -72,6 +72,67 @@ query from the `cohere-embeddings` index sorted by their proximity to the query: // end::cohere[] +// tag::elser[] + +[source,console] +-------------------------------------------------- +GET elser-embeddings/_search +{ + "query":{ + "sparse_vector":{ + "field": "content_embedding", + "inference_id": "elser_embeddings", + "query": "How to avoid muscle soreness after running?" + } + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `cohere-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "elser-embeddings", + "_id": "ZLGc_pABZbBmsu5_eCoH", + "_score": 21.472063, + "_source": { + "id": 2258240, + "content": "You may notice some muscle aches while you are exercising. This is called acute soreness. More often, you may begin to feel sore about 12 hours after exercising, and the discomfort usually peaks at 48 to 72 hours after exercise. This is called delayed-onset muscle soreness.It is thought that, during this time, your body is repairing the muscle, making it stronger and bigger.You may also notice the muscles feel better if you exercise lightly. This is normal.his is called delayed-onset muscle soreness. It is thought that, during this time, your body is repairing the muscle, making it stronger and bigger. You may also notice the muscles feel better if you exercise lightly. This is normal." + } + }, + { + "_index": "elser-embeddings", + "_id": "ZbGc_pABZbBmsu5_eCoH", + "_score": 21.421381, + "_source": { + "id": 2258242, + "content": "Photo Credit Jupiterimages/Stockbyte/Getty Images. That stiff, achy feeling you get in the days after exercise is a normal physiological response known as delayed onset muscle soreness. You can take it as a positive sign that your muscles have felt the workout, but the pain may also turn you off to further exercise.ou are more likely to develop delayed onset muscle soreness if you are new to working out, if you’ve gone a long time without exercising and start up again, if you have picked up a new type of physical activity or if you have recently boosted the intensity, length or frequency of your exercise sessions." + } + }, + { + "_index": "elser-embeddings", + "_id": "ZrGc_pABZbBmsu5_eCoH", + "_score": 20.542095, + "_source": { + "id": 2258248, + "content": "They found that stretching before and after exercise has no effect on muscle soreness. Exercise might cause inflammation, which leads to an increase in the production of immune cells (comprised mostly of macrophages and neutrophils). Levels of these immune cells reach a peak 24-48 hours after exercise.These cells, in turn, produce bradykinins and prostaglandins, which make the pain receptors in your body more sensitive. Whenever you move, these pain receptors are stimulated.hey found that stretching before and after exercise has no effect on muscle soreness. Exercise might cause inflammation, which leads to an increase in the production of immune cells (comprised mostly of macrophages and neutrophils). Levels of these immune cells reach a peak 24-48 hours after exercise." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::elser[] + // tag::hugging-face[] [source,console] @@ -470,3 +531,68 @@ query from the `amazon-bedrock-embeddings` index sorted by their proximity to th // NOTCONSOLE // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] + +[source,console] +-------------------------------------------------- +GET alibabacloud-ai-search-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "alibabacloud_ai_search_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `alibabacloud-ai-search-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "alibabacloud-ai-search-embeddings", + "_id": "DDd5OowBHxQKHyc3TDSC", + "_score": 0.83704096, + "_source": { + "id": 862114, + "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." + } + }, + { + "_index": "alibabacloud-ai-search-embeddings", + "_id": "ajd5OowBHxQKHyc3TDSC", + "_score": 0.8345704, + "_source": { + "id": 820622, + "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." + } + }, + { + "_index": "alibabacloud-ai-search-embeddings", + "_id": "Djd5OowBHxQKHyc3TDSC", + "_score": 0.8327426, + "_source": { + "id": 8202683, + "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::alibabacloud-ai-search[] diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc index d13301b64a871..1dfa6077553fe 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc @@ -7,7 +7,13 @@ id="infer-api-task-cohere"> Cohere - + +
+
+
diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc index c6ef2a46a8731..2b4aa1a200102 100644 --- a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -28,9 +28,29 @@ NOTE: When using this model the recommended similarity measure to use in the embeddings are normalized to unit length in which case the `dot_product` and the `cosine` measures are equivalent. +// end::cohere[] +// tag::elser[] -// end::cohere[] +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/elser_embeddings <1> +{ + "service": "elser", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `sparse_embedding` in the path and the `inference_id` which +is the unique identifier of the {infer} endpoint is `elser_embeddings`. + +You don't need to download and deploy the ELSER model upfront, the API request +above will download the model if it's not downloaded yet and then deploy it. + +// end::elser[] // tag::hugging-face[] @@ -203,3 +223,32 @@ PUT _inference/text_embedding/amazon_bedrock_embeddings <1> <6> The model ID or ARN of the model to use. // end::amazon-bedrock[] + +// tag::alibabacloud-ai-search[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/alibabacloud_ai_search_embeddings <1> +{ + "service": "alibabacloud-ai-search", + "service_settings": { + "api_key": "", <2> + "service_id": "", <3> + "host": "", <4> + "workspace": "" <5> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path and the `inference_id` which is the unique identifier of the {infer} endpoint is `alibabacloud_ai_search_embeddings`. +<2> The API key for accessing the AlibabaCloud AI Search API. You can find your API keys in +your AlibabaCloud account under the +https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The AlibabaCloud AI Search embeddings model name, for example `ops-text-embedding-zh-001`. +<4> The name our your AlibabaCloud AI Search host address. +<5> The name our your AlibabaCloud AI Search workspace. + +// end::alibabacloud-ai-search[] + diff --git a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc index 2b3fcc1b6df9f..7eb27d5428956 100644 --- a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc +++ b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc @@ -4,31 +4,39 @@ ++++ Watermark errors ++++ -:keywords: {es}, high watermark, low watermark, full disk +:keywords: {es}, high watermark, low watermark, full disk, flood stage watermark When a data node is critically low on disk space and has reached the <>, the following error is logged: `Error: disk usage exceeded flood-stage watermark, index has read-only-allow-delete block`. -To prevent a full disk, when a node reaches this watermark, {es} blocks writes +To prevent a full disk, when a node reaches this watermark, {es} <> to any index with a shard on the node. If the block affects related system -indices, {kib} and other {stack} features may become unavailable. +indices, {kib} and other {stack} features may become unavailable. For example, +this could induce {kib}'s `Kibana Server is not Ready yet` +{kibana-ref}/access.html#not-ready[error message]. {es} will automatically remove the write block when the affected node's disk -usage goes below the <>. To -achieve this, {es} automatically moves some of the affected node's shards to -other nodes in the same data tier. +usage falls below the <>. +To achieve this, {es} attempts to rebalance some of the affected node's shards +to other nodes in the same data tier. -To verify that shards are moving off the affected node, use the <>. +[[fix-watermark-errors-rebalance]] +==== Monitor rebalancing + +To verify that shards are moving off the affected node until it falls below high +watermark., use the <> and <>: [source,console] ---- GET _cat/shards?v=true + +GET _cat/recovery?v=true&active_only=true ---- -If shards remain on the node, use the <> to get an explanation for their allocation status. +If shards remain on the node keeping it about high watermark, use the +<> to get an +explanation for their allocation status. [source,console] ---- @@ -36,16 +44,18 @@ GET _cluster/allocation/explain { "index": "my-index", "shard": 0, - "primary": false, - "current_node": "my-node" + "primary": false } ---- // TEST[s/^/PUT my-index\n/] // TEST[s/"primary": false,/"primary": false/] -// TEST[s/"current_node": "my-node"//] -To immediately restore write operations, you can temporarily increase the disk -watermarks and remove the write block. +[[fix-watermark-errors-temporary]] +==== Temporary Relief + +To immediately restore write operations, you can temporarily increase the +<> and remove the +<>. [source,console] ---- @@ -70,18 +80,7 @@ PUT */_settings?expand_wildcards=all ---- // TEST[s/^/PUT my-index\n/] -As a long-term solution, we recommend you add nodes to the affected data tiers -or upgrade existing nodes to increase disk space. To free up additional disk -space, you can delete unneeded indices using the <>. - -[source,console] ----- -DELETE my-index ----- -// TEST[s/^/PUT my-index\n/] - -When a long-term solution is in place, reset or reconfigure the disk watermarks. +When a long-term solution is in place, to reset or reconfigure the disk watermarks: [source,console] ---- @@ -99,3 +98,21 @@ PUT _cluster/settings } } ---- + +[[fix-watermark-errors-resolve]] +==== Resolve + +As a long-term solution, we recommend you do one of the following best suited +to your use case: + +* add nodes to the affected <> + +* upgrade existing nodes to increase disk space ++ +TIP: On {ess}, https://support.elastic.co[Elastic Support] intervention may +become necessary if <> reaches `status:red`. + +* delete unneeded indices using the <> + +* update related <> to push indices +through to later <> diff --git a/docs/reference/troubleshooting/network-timeouts.asciidoc b/docs/reference/troubleshooting/network-timeouts.asciidoc index ef942ac1d268d..ef666c09f87db 100644 --- a/docs/reference/troubleshooting/network-timeouts.asciidoc +++ b/docs/reference/troubleshooting/network-timeouts.asciidoc @@ -16,20 +16,22 @@ end::troubleshooting-network-timeouts-gc-vm[] tag::troubleshooting-network-timeouts-packet-capture-elections[] * Packet captures will reveal system-level and network-level faults, especially -if you capture the network traffic simultaneously at all relevant nodes. You -should be able to observe any retransmissions, packet loss, or other delays on -the connections between the nodes. +if you capture the network traffic simultaneously at all relevant nodes and +analyse it alongside the {es} logs from those nodes. You should be able to +observe any retransmissions, packet loss, or other delays on the connections +between the nodes. end::troubleshooting-network-timeouts-packet-capture-elections[] tag::troubleshooting-network-timeouts-packet-capture-fault-detection[] * Packet captures will reveal system-level and network-level faults, especially if you capture the network traffic simultaneously at the elected master and the -faulty node. The connection used for follower checks is not used for any other -traffic so it can be easily identified from the flow pattern alone, even if TLS -is in use: almost exactly every second there will be a few hundred bytes sent -each way, first the request by the master and then the response by the -follower. You should be able to observe any retransmissions, packet loss, or -other delays on such a connection. +faulty node and analyse it alongside the {es} logs from those nodes. The +connection used for follower checks is not used for any other traffic so it can +be easily identified from the flow pattern alone, even if TLS is in use: almost +exactly every second there will be a few hundred bytes sent each way, first the +request by the master and then the response by the follower. You should be able +to observe any retransmissions, packet loss, or other delays on such a +connection. end::troubleshooting-network-timeouts-packet-capture-fault-detection[] tag::troubleshooting-network-timeouts-threads[] diff --git a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc index 387ebcdcd43c0..cbb35f7731034 100644 --- a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc @@ -1,4 +1,316 @@ [[troubleshooting-unstable-cluster]] == Troubleshooting an unstable cluster -include::../modules/discovery/fault-detection.asciidoc[tag=troubleshooting,leveloffset=-2] \ No newline at end of file +Normally, a node will only leave a cluster if deliberately shut down. If a node +leaves the cluster unexpectedly, it's important to address the cause. A cluster +in which nodes leave unexpectedly is unstable and can create several issues. +For instance: + +* The cluster health may be yellow or red. + +* Some shards will be initializing and other shards may be failing. + +* Search, indexing, and monitoring operations may fail and report exceptions in +logs. + +* The `.security` index may be unavailable, blocking access to the cluster. + +* The master may appear busy due to frequent cluster state updates. + +To troubleshoot a cluster in this state, first ensure the cluster has a +<>. Next, focus on the nodes +unexpectedly leaving the cluster ahead of all other issues. It will not be +possible to solve other issues until the cluster has a stable master node and +stable node membership. + +Diagnostics and statistics are usually not useful in an unstable cluster. These +tools only offer a view of the state of the cluster at a single point in time. +Instead, look at the cluster logs to see the pattern of behaviour over time. +Focus particularly on logs from the elected master. When a node leaves the +cluster, logs for the elected master include a message like this (with line +breaks added to make it easier to read): + +[source,text] +---- +[2022-03-21T11:02:35,513][INFO ][o.e.c.c.NodeLeftExecutor] [instance-0000000000] + node-left: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m}] + with reason [disconnected] +---- + +This message says that the `NodeLeftExecutor` on the elected master +(`instance-0000000000`) processed a `node-left` task, identifying the node that +was removed and the reason for its removal. When the node joins the cluster +again, logs for the elected master will include a message like this (with line +breaks added to make it easier to read): + +[source,text] +---- +[2022-03-21T11:02:59,892][INFO ][o.e.c.c.NodeJoinExecutor] [instance-0000000000] + node-join: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{UNw_RuazQCSBskWZV8ID_w}{172.27.47.21}{172.27.47.21:19054}{m}] + with reason [joining after restart, removed [24s] ago with reason [disconnected]] +---- + +This message says that the `NodeJoinExecutor` on the elected master +(`instance-0000000000`) processed a `node-join` task, identifying the node that +was added to the cluster and the reason for the task. + +Other nodes may log similar messages, but report fewer details: + +[source,text] +---- +[2020-01-29T11:02:36,985][INFO ][o.e.c.s.ClusterApplierService] + [instance-0000000001] removed { + {instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m} + {tiebreaker-0000000003}{UNw_RuazQCSBskWZV8ID_w}{bltyVOQ-RNu20OQfTHSLtA}{172.27.161.154}{172.27.161.154:19251}{mv} + }, term: 14, version: 1653415, reason: Publication{term=14, version=1653415} +---- + +These messages are not especially useful for troubleshooting, so focus on the +ones from the `NodeLeftExecutor` and `NodeJoinExecutor` which are only emitted +on the elected master and which contain more details. If you don't see the +messages from the `NodeLeftExecutor` and `NodeJoinExecutor`, check that: + +* You're looking at the logs for the elected master node. + +* The logs cover the correct time period. + +* Logging is enabled at `INFO` level. + +Nodes will also log a message containing `master node changed` whenever they +start or stop following the elected master. You can use these messages to +determine each node's view of the state of the master over time. + +If a node restarts, it will leave the cluster and then join the cluster again. +When it rejoins, the `NodeJoinExecutor` will log that it processed a +`node-join` task indicating that the node is `joining after restart`. If a node +is unexpectedly restarting, look at the node's logs to see why it is shutting +down. + +The <> API on the affected node will also provide some useful +information about the situation. + +If the node did not restart then you should look at the reason for its +departure more closely. Each reason has different troubleshooting steps, +described below. There are three possible reasons: + +* `disconnected`: The connection from the master node to the removed node was +closed. + +* `lagging`: The master published a cluster state update, but the removed node +did not apply it within the permitted timeout. By default, this timeout is 2 +minutes. Refer to <> for information about the +settings which control this mechanism. + +* `followers check retry count exceeded`: The master sent a number of +consecutive health checks to the removed node. These checks were rejected or +timed out. By default, each health check times out after 10 seconds and {es} +removes the node removed after three consecutively failed health checks. Refer +to <> for information about the settings which +control this mechanism. + +[discrete] +[[troubleshooting-unstable-cluster-disconnected]] +=== Diagnosing `disconnected` nodes + +Nodes typically leave the cluster with reason `disconnected` when they shut +down, but if they rejoin the cluster without restarting then there is some +other problem. + +{es} is designed to run on a fairly reliable network. It opens a number of TCP +connections between nodes and expects these connections to remain open +<>. If a connection is closed then {es} will +try and reconnect, so the occasional blip may fail some in-flight operations +but should otherwise have limited impact on the cluster. In contrast, +repeatedly-dropped connections will severely affect its operation. + +The connections from the elected master node to every other node in the cluster +are particularly important. The elected master never spontaneously closes its +outbound connections to other nodes. Similarly, once an inbound connection is +fully established, a node never spontaneously it unless the node is shutting +down. + +If you see a node unexpectedly leave the cluster with the `disconnected` +reason, something other than {es} likely caused the connection to close. A +common cause is a misconfigured firewall with an improper timeout or another +policy that's <>. It could also +be caused by general connectivity issues, such as packet loss due to faulty +hardware or network congestion. If you're an advanced user, configure the +following loggers to get more detailed information about network exceptions: + +[source,yaml] +---- +logger.org.elasticsearch.transport.TcpTransport: DEBUG +logger.org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport: DEBUG +---- + +If these logs do not show enough information to diagnose the problem, obtain a +packet capture simultaneously from the nodes at both ends of an unstable +connection and analyse it alongside the {es} logs from those nodes to determine +if traffic between the nodes is being disrupted by another device on the +network. + +[discrete] +[[troubleshooting-unstable-cluster-lagging]] +=== Diagnosing `lagging` nodes + +{es} needs every node to process cluster state updates reasonably quickly. If a +node takes too long to process a cluster state update, it can be harmful to the +cluster. The master will remove these nodes with the `lagging` reason. Refer to +<> for information about the settings which control +this mechanism. + +Lagging is typically caused by performance issues on the removed node. However, +a node may also lag due to severe network delays. To rule out network delays, +ensure that `net.ipv4.tcp_retries2` is <>. Log messages that contain `warn threshold` may provide more +information about the root cause. + +If you're an advanced user, you can get more detailed information about what +the node was doing when it was removed by configuring the following logger: + +[source,yaml] +---- +logger.org.elasticsearch.cluster.coordination.LagDetector: DEBUG +---- + +When this logger is enabled, {es} will attempt to run the +<> API on the faulty node and report the results in +the logs on the elected master. The results are compressed, encoded, and split +into chunks to avoid truncation: + +[source,text] +---- +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 1]: H4sIAAAAAAAA/x... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 2]: p7x3w1hmOQVtuV... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 3]: v7uTboMGDbyOy+... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 4]: 4tse0RnPnLeDNN... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) +---- + +To reconstruct the output, base64-decode the data and decompress it using +`gzip`. For instance, on Unix-like systems: + +[source,sh] +---- +cat lagdetector.log | sed -e 's/.*://' | base64 --decode | gzip --decompress +---- + +[discrete] +[[troubleshooting-unstable-cluster-follower-check]] +=== Diagnosing `follower check retry count exceeded` nodes + +Nodes sometimes leave the cluster with reason `follower check retry count +exceeded` when they shut down, but if they rejoin the cluster without +restarting then there is some other problem. + +{es} needs every node to respond to network messages successfully and +reasonably quickly. If a node rejects requests or does not respond at all then +it can be harmful to the cluster. If enough consecutive checks fail then the +master will remove the node with reason `follower check retry count exceeded` +and will indicate in the `node-left` message how many of the consecutive +unsuccessful checks failed and how many of them timed out. Refer to +<> for information about the settings which control +this mechanism. + +Timeouts and failures may be due to network delays or performance problems on +the affected nodes. Ensure that `net.ipv4.tcp_retries2` is +<> to eliminate network delays as +a possible cause for this kind of instability. Log messages containing +`warn threshold` may give further clues about the cause of the instability. + +If the last check failed with an exception then the exception is reported, and +typically indicates the problem that needs to be addressed. If any of the +checks timed out then narrow down the problem as follows. + +include::network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-gc-vm] + +include::network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-packet-capture-fault-detection] + +include::network-timeouts.asciidoc[tag=troubleshooting-network-timeouts-threads] + +By default the follower checks will time out after 30s, so if node departures +are unpredictable then capture stack dumps every 15s to be sure that at least +one stack dump was taken at the right time. + +[discrete] +[[troubleshooting-unstable-cluster-shardlockobtainfailedexception]] +=== Diagnosing `ShardLockObtainFailedException` failures + +If a node leaves and rejoins the cluster then {es} will usually shut down and +re-initialize its shards. If the shards do not shut down quickly enough then +{es} may fail to re-initialize them due to a `ShardLockObtainFailedException`. + +To gather more information about the reason for shards shutting down slowly, +configure the following logger: + +[source,yaml] +---- +logger.org.elasticsearch.env.NodeEnvironment: DEBUG +---- + +When this logger is enabled, {es} will attempt to run the +<> API whenever it encounters a +`ShardLockObtainFailedException`. The results are compressed, encoded, and +split into chunks to avoid truncation: + +[source,text] +---- +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 1]: H4sIAAAAAAAA/x... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 2]: p7x3w1hmOQVtuV... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 3]: v7uTboMGDbyOy+... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 4]: 4tse0RnPnLeDNN... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) +---- + +To reconstruct the output, base64-decode the data and decompress it using +`gzip`. For instance, on Unix-like systems: + +[source,sh] +---- +cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress +---- + +[discrete] +[[troubleshooting-unstable-cluster-network]] +=== Diagnosing other network disconnections + +{es} is designed to run on a fairly reliable network. It opens a number of TCP +connections between nodes and expects these connections to remain open +<>. If a connection is closed then {es} will +try and reconnect, so the occasional blip may fail some in-flight operations +but should otherwise have limited impact on the cluster. In contrast, +repeatedly-dropped connections will severely affect its operation. + +{es} nodes will only actively close an outbound connection to another node if +the other node leaves the cluster. See +<> for further information about +identifying and troubleshooting this situation. If an outbound connection +closes for some other reason, nodes will log a message such as the following: + +[source,text] +---- +[INFO ][o.e.t.ClusterConnectionManager] [node-1] transport connection to [{node-2}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] closed by remote +---- + +Similarly, once an inbound connection is fully established, a node never +spontaneously closes it unless the node is shutting down. + +Therefore if you see a node report that a connection to another node closed +unexpectedly, something other than {es} likely caused the connection to close. +A common cause is a misconfigured firewall with an improper timeout or another +policy that's <>. It could also +be caused by general connectivity issues, such as packet loss due to faulty +hardware or network congestion. If you're an advanced user, configure the +following loggers to get more detailed information about network exceptions: + +[source,yaml] +---- +logger.org.elasticsearch.transport.TcpTransport: DEBUG +logger.org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport: DEBUG +---- + +If these logs do not show enough information to diagnose the problem, obtain a +packet capture simultaneously from the nodes at both ends of an unstable +connection and analyse it alongside the {es} logs from those nodes to determine +if traffic between the nodes is being disrupted by another device on the +network. diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index a93b6dfc6c60b..f69a673095257 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -17,3 +17,7 @@ PUT _cluster/settings } -------------------------------------------------- // TEST[skip:indexes don't assign] + +You can also consider <> when restarting +large clusters to reduce initial strain while nodes are processing +<>. \ No newline at end of file diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 792330fd3613b..12f7776add17b 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -44,6 +44,6 @@ snakeyaml = { group = "org.yaml", name = "snakeyaml", version = { strictly = "2. spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } spock-platform = { group = "org.spockframework", name="spock-bom", version.ref="spock" } -spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.22.0" +spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.25.0" wiremock = "com.github.tomakehurst:wiremock-jre8-standalone:2.23.2" xmlunit-core = "org.xmlunit:xmlunit-core:2.8.2" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 5e26d96c4ca17..3a4f5ef9d240c 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -119,29 +119,44 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + + + + + + + + + + + + + + + + @@ -189,19 +204,14 @@ - - - - - - - - + + + @@ -214,36 +224,19 @@ - - - - - - - - - - - - - - - - - - + + + - - - - - + + + - - - + + + @@ -271,11 +264,6 @@ - - - - - @@ -296,9 +284,14 @@ - - - + + + + + + + + @@ -331,6 +324,16 @@ + + + + + + + + + + @@ -341,14 +344,19 @@ - - - + + + - - - + + + + + + + + @@ -361,14 +369,19 @@ - - - + + + - - - + + + + + + + + @@ -386,14 +399,9 @@ - - - - - - - - + + + @@ -401,9 +409,9 @@ - - - + + + @@ -551,6 +559,11 @@ + + + + + @@ -561,9 +574,9 @@ - - - + + + @@ -581,6 +594,11 @@ + + + + + @@ -591,14 +609,9 @@ - - - - - - - - + + + @@ -616,11 +629,6 @@ - - - - - @@ -646,6 +654,11 @@ + + + + + @@ -741,6 +754,11 @@ + + + + + @@ -876,6 +894,16 @@ + + + + + + + + + + @@ -911,19 +939,31 @@ + + + + + + + + + + - - - + + + + + @@ -931,6 +971,16 @@ + + + + + + + + + + @@ -961,14 +1011,9 @@ - - - - - - - - + + + @@ -981,14 +1026,9 @@ - - - - - - - - + + + @@ -1261,14 +1301,14 @@ - - - + + + - - - + + + @@ -1471,19 +1511,19 @@ - - - + + + - - - + + + - - - + + + @@ -1501,6 +1541,11 @@ + + + + + @@ -1649,6 +1694,11 @@ + + + + + @@ -1674,6 +1724,11 @@ + + + + + @@ -1689,6 +1744,16 @@ + + + + + + + + + + @@ -3182,11 +3247,6 @@ - - - - - @@ -3207,16 +3267,16 @@ + + + + + - - - - - @@ -3312,6 +3372,11 @@ + + + + + @@ -3322,14 +3387,19 @@ - - - + + + - - - + + + + + + + + @@ -3447,69 +3517,49 @@ - - - - - - - - - - - - - - - - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - - - - - + + + @@ -3517,19 +3567,19 @@ - - - + + + - - - + + + - - - + + + @@ -3537,9 +3587,14 @@ - - - + + + + + + + + @@ -3627,54 +3682,24 @@ - - - - - - - - + + + - - - + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + @@ -4117,6 +4142,11 @@ + + + + + diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 2c3521197d7c4..a4b76b9530d66 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/libs/core/src/main/java/org/elasticsearch/core/UpdateForV10.java b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV10.java new file mode 100644 index 0000000000000..0fe816bd3721d --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/core/UpdateForV10.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.core; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to identify a block of code (a whole class, a method, or a field) that needs to be reviewed (for cleanup, remove or change) + * before releasing 10.0 + */ +@Retention(RetentionPolicy.SOURCE) +@Target({ ElementType.LOCAL_VARIABLE, ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface UpdateForV10 { +} diff --git a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java index f3f53f1b3c5ea..3c01e490369de 100644 --- a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java @@ -203,7 +203,7 @@ public Map parse(String inputString) { DissectKey key = dissectPair.key(); byte[] delimiter = dissectPair.delimiter().getBytes(StandardCharsets.UTF_8); // start dissection after the first delimiter - int i = leadingDelimiter.length(); + int i = leadingDelimiter.getBytes(StandardCharsets.UTF_8).length; int valueStart = i; int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed diff --git a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java index 431b26fc1155d..2893e419a84a3 100644 --- a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java +++ b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java @@ -211,6 +211,18 @@ public void testMatchUnicode() { assertMatch("%{a->}࿏%{b}", "⟳༒࿏࿏࿏࿏࿏༒⟲", Arrays.asList("a", "b"), Arrays.asList("⟳༒", "༒⟲")); assertMatch("%{*a}࿏%{&a}", "⟳༒࿏༒⟲", Arrays.asList("⟳༒"), Arrays.asList("༒⟲")); assertMatch("%{}࿏%{a}", "⟳༒࿏༒⟲", Arrays.asList("a"), Arrays.asList("༒⟲")); + assertMatch( + "Zürich, the %{adjective} city in Switzerland", + "Zürich, the largest city in Switzerland", + Arrays.asList("adjective"), + Arrays.asList("largest") + ); + assertMatch( + "Zürich, the %{one} city in Switzerland; Zürich, the %{two} city in Switzerland", + "Zürich, the largest city in Switzerland; Zürich, the LARGEST city in Switzerland", + Arrays.asList("one", "two"), + Arrays.asList("largest", "LARGEST") + ); } public void testMatchRemainder() { diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java index d233dcc81a3fc..1e7ac3f8097e9 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownText.java @@ -43,6 +43,7 @@ public class WellKnownText { public static final String RPAREN = ")"; public static final String COMMA = ","; public static final String NAN = "NaN"; + public static final int MAX_NESTED_DEPTH = 1000; private static final String NUMBER = ""; private static final String EOF = "END-OF-STREAM"; @@ -425,7 +426,7 @@ public static Geometry fromWKT(GeometryValidator validator, boolean coerce, Stri tokenizer.whitespaceChars('\r', '\r'); tokenizer.whitespaceChars('\n', '\n'); tokenizer.commentChar('#'); - Geometry geometry = parseGeometry(tokenizer, coerce); + Geometry geometry = parseGeometry(tokenizer, coerce, 0); validator.validate(geometry); return geometry; } finally { @@ -436,40 +437,35 @@ public static Geometry fromWKT(GeometryValidator validator, boolean coerce, Stri /** * parse geometry from the stream tokenizer */ - private static Geometry parseGeometry(StreamTokenizer stream, boolean coerce) throws IOException, ParseException { + private static Geometry parseGeometry(StreamTokenizer stream, boolean coerce, int depth) throws IOException, ParseException { final String type = nextWord(stream).toLowerCase(Locale.ROOT); - switch (type) { - case "point": - return parsePoint(stream); - case "multipoint": - return parseMultiPoint(stream); - case "linestring": - return parseLine(stream); - case "multilinestring": - return parseMultiLine(stream); - case "polygon": - return parsePolygon(stream, coerce); - case "multipolygon": - return parseMultiPolygon(stream, coerce); - case "bbox": - return parseBBox(stream); - case "geometrycollection": - return parseGeometryCollection(stream, coerce); - case "circle": // Not part of the standard, but we need it for internal serialization - return parseCircle(stream); - } - throw new IllegalArgumentException("Unknown geometry type: " + type); - } - - private static GeometryCollection parseGeometryCollection(StreamTokenizer stream, boolean coerce) throws IOException, - ParseException { + return switch (type) { + case "point" -> parsePoint(stream); + case "multipoint" -> parseMultiPoint(stream); + case "linestring" -> parseLine(stream); + case "multilinestring" -> parseMultiLine(stream); + case "polygon" -> parsePolygon(stream, coerce); + case "multipolygon" -> parseMultiPolygon(stream, coerce); + case "bbox" -> parseBBox(stream); + case "geometrycollection" -> parseGeometryCollection(stream, coerce, depth + 1); + case "circle" -> // Not part of the standard, but we need it for internal serialization + parseCircle(stream); + default -> throw new IllegalArgumentException("Unknown geometry type: " + type); + }; + } + + private static GeometryCollection parseGeometryCollection(StreamTokenizer stream, boolean coerce, int depth) + throws IOException, ParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return GeometryCollection.EMPTY; } + if (depth > MAX_NESTED_DEPTH) { + throw new ParseException("maximum nested depth of " + MAX_NESTED_DEPTH + " exceeded", stream.lineno()); + } List shapes = new ArrayList<>(); - shapes.add(parseGeometry(stream, coerce)); + shapes.add(parseGeometry(stream, coerce, depth)); while (nextCloserOrComma(stream).equals(COMMA)) { - shapes.add(parseGeometry(stream, coerce)); + shapes.add(parseGeometry(stream, coerce, depth)); } return new GeometryCollection<>(shapes); } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java index 6a7bda7f9e0bb..b3f7aa610153b 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java @@ -19,6 +19,8 @@ import java.util.Arrays; import java.util.Collections; +import static org.hamcrest.Matchers.containsString; + public class GeometryCollectionTests extends BaseGeometryTestCase> { @Override protected GeometryCollection createTestInstance(boolean hasAlt) { @@ -65,6 +67,31 @@ public void testInitValidation() { StandardValidator.instance(true).validate(new GeometryCollection(Collections.singletonList(new Point(20, 10, 30)))); } + public void testDeeplyNestedCollection() throws IOException, ParseException { + String wkt = makeDeeplyNestedGeometryCollectionWKT(WellKnownText.MAX_NESTED_DEPTH); + Geometry parsed = WellKnownText.fromWKT(GeographyValidator.instance(true), true, wkt); + assertEquals(WellKnownText.MAX_NESTED_DEPTH, countNestedGeometryCollections((GeometryCollection) parsed)); + } + + public void testTooDeeplyNestedCollection() { + String wkt = makeDeeplyNestedGeometryCollectionWKT(WellKnownText.MAX_NESTED_DEPTH + 1); + ParseException ex = expectThrows(ParseException.class, () -> WellKnownText.fromWKT(GeographyValidator.instance(true), true, wkt)); + assertThat(ex.getMessage(), containsString("maximum nested depth of " + WellKnownText.MAX_NESTED_DEPTH)); + } + + private String makeDeeplyNestedGeometryCollectionWKT(int depth) { + return "GEOMETRYCOLLECTION (".repeat(depth) + "POINT (20.0 10.0)" + ")".repeat(depth); + } + + private int countNestedGeometryCollections(GeometryCollection geometry) { + int count = 1; + while (geometry.get(0) instanceof GeometryCollection g) { + count += 1; + geometry = g; + } + return count; + } + @Override protected GeometryCollection mutateInstance(GeometryCollection instance) { return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java index bcf9253866931..3b10d58815169 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java @@ -8,12 +8,17 @@ package org.elasticsearch.grok; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; public class PatternBank { @@ -57,52 +62,102 @@ public PatternBank extendWith(Map extraPatterns) { } /** - * Checks whether patterns reference each other in a circular manner and if so fail with an exception. + * Checks whether patterns reference each other in a circular manner and if so fail with an IllegalArgumentException. It will also + * fail if any pattern value contains a pattern name that does not exist in the bank. *

* In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ static void forbidCircularReferences(Map bank) { - // first ensure that the pattern bank contains no simple circular references (i.e., any pattern - // containing an immediate reference to itself) as those can cause the remainder of this algorithm - // to recurse infinitely - for (Map.Entry entry : bank.entrySet()) { - if (patternReferencesItself(entry.getValue(), entry.getKey())) { - throw new IllegalArgumentException("circular reference in pattern [" + entry.getKey() + "][" + entry.getValue() + "]"); + Set allVisitedNodes = new HashSet<>(); + Set nodesVisitedMoreThanOnceInAPath = new HashSet<>(); + // Walk the full path starting at each node in the graph: + for (String traversalStartNode : bank.keySet()) { + if (nodesVisitedMoreThanOnceInAPath.contains(traversalStartNode) == false && allVisitedNodes.contains(traversalStartNode)) { + // If we have seen this node before in a path, and it only appeared once in that path, there is no need to check it again + continue; } - } - - // next, recursively check any other pattern names referenced in each pattern - for (Map.Entry entry : bank.entrySet()) { - String name = entry.getKey(); - String pattern = entry.getValue(); - innerForbidCircularReferences(bank, name, new ArrayList<>(), pattern); + Set visitedFromThisStartNode = new LinkedHashSet<>(); + /* + * This stack records where we are in the graph. Each String[] in the stack represents a collection of neighbors to the first + * non-null node in the layer below it. Null means that the path from that location has been fully traversed. Once all nodes + * at a layer have been set to null, the layer is popped. So for example say we have the graph + * ( 1 -> (2 -> (4, 5, 8), 3 -> (6, 7))) then when we are at 6 via 1 -> 3 -> 6, the stack looks like this: + * [6, 7] + * [null, 3] + * [1] + */ + Deque stack = new ArrayDeque<>(); + stack.push(new String[] { traversalStartNode }); + // This is used so that we know that we're unwinding the stack and know not to get the current node's neighbors again. + boolean unwinding = false; + while (stack.isEmpty() == false) { + String[] currentLevel = stack.peek(); + int firstNonNullIndex = findFirstNonNull(currentLevel); + String node = currentLevel[firstNonNullIndex]; + boolean endOfThisPath = false; + if (unwinding) { + // We have completed all of this node's neighbors and have popped back to the node + endOfThisPath = true; + } else if (traversalStartNode.equals(node) && stack.size() > 1) { + Deque reversedPath = new ArrayDeque<>(); + for (String[] level : stack) { + reversedPath.push(level[findFirstNonNull(level)]); + } + throw new IllegalArgumentException("circular reference detected: " + String.join("->", reversedPath)); + } else if (visitedFromThisStartNode.contains(node)) { + /* + * We are only looking for a cycle starting and ending at traversalStartNode right now. But this node has been + * visited more than once in the path rooted at traversalStartNode. This could be because it is a cycle, or could be + * because two nodes in the path both point to it. We add it to nodesVisitedMoreThanOnceInAPath so that we make sure + * to check the path rooted at this node later. + */ + nodesVisitedMoreThanOnceInAPath.add(node); + endOfThisPath = true; + } else { + visitedFromThisStartNode.add(node); + String[] neighbors = getPatternNamesForPattern(bank, node); + if (neighbors.length == 0) { + endOfThisPath = true; + } else { + stack.push(neighbors); + } + } + if (endOfThisPath) { + if (firstNonNullIndex == currentLevel.length - 1) { + // We have handled all the neighbors at this level -- there are no more non-null ones + stack.pop(); + unwinding = true; + } else { + currentLevel[firstNonNullIndex] = null; + unwinding = false; + } + } else { + unwinding = false; + } + } + allVisitedNodes.addAll(visitedFromThisStartNode); } } - private static void innerForbidCircularReferences(Map bank, String patternName, List path, String pattern) { - if (patternReferencesItself(pattern, patternName)) { - String message; - if (path.isEmpty()) { - message = "circular reference in pattern [" + patternName + "][" + pattern + "]"; - } else { - message = "circular reference in pattern [" - + path.remove(path.size() - 1) - + "][" - + pattern - + "] back to pattern [" - + patternName - + "]"; - // add rest of the path: - if (path.isEmpty() == false) { - message += " via patterns [" + String.join("=>", path) + "]"; - } + private static int findFirstNonNull(String[] level) { + for (int i = 0; i < level.length; i++) { + if (level[i] != null) { + return i; } - throw new IllegalArgumentException(message); } + return -1; + } - // next check any other pattern names found in the pattern + /** + * This method returns the array of pattern names (if any) found in the bank for the pattern named patternName. If no pattern names + * are found, an empty array is returned. If any of the list of pattern names to be returned does not exist in the bank, an exception + * is thrown. + */ + private static String[] getPatternNamesForPattern(Map bank, String patternName) { + String pattern = bank.get(patternName); + List patternReferences = new ArrayList<>(); for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { int begin = i + 2; int bracketIndex = pattern.indexOf('}', begin); @@ -112,25 +167,22 @@ private static void innerForbidCircularReferences(Map bank, Stri end = bracketIndex; } else if (columnIndex != -1 && bracketIndex == -1) { end = columnIndex; - } else if (bracketIndex != -1 && columnIndex != -1) { + } else if (bracketIndex != -1) { end = Math.min(bracketIndex, columnIndex); } else { throw new IllegalArgumentException("pattern [" + pattern + "] has an invalid syntax"); } String otherPatternName = pattern.substring(begin, end); - path.add(otherPatternName); - String otherPattern = bank.get(otherPatternName); - if (otherPattern == null) { - throw new IllegalArgumentException( - "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" - ); + if (patternReferences.contains(otherPatternName) == false) { + patternReferences.add(otherPatternName); + String otherPattern = bank.get(otherPatternName); + if (otherPattern == null) { + throw new IllegalArgumentException( + "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" + ); + } } - - innerForbidCircularReferences(bank, patternName, path, otherPattern); } - } - - private static boolean patternReferencesItself(String pattern, String patternName) { - return pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); + return patternReferences.toArray(new String[0]); } } diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java index b66778743aec0..5ed1a7d13b80a 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java @@ -7,12 +7,12 @@ */ package org.elasticsearch.grok; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.test.ESTestCase; import org.joni.Matcher; import org.mockito.Mockito; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -77,16 +77,17 @@ public void testIdleIfNothingRegistered() throws Exception { ); // Periodic action is not scheduled because no thread is registered verifyNoMoreInteractions(threadPool); - CompletableFuture commandFuture = new CompletableFuture<>(); + + PlainActionFuture commandFuture = new PlainActionFuture<>(); // Periodic action is scheduled because a thread is registered doAnswer(invocationOnMock -> { - commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]); + commandFuture.onResponse(invocationOnMock.getArgument(0)); return null; }).when(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)); Matcher matcher = mock(Matcher.class); watchdog.register(matcher); // Registering the first thread should have caused the command to get scheduled again - Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS); + Runnable command = safeGet(commandFuture); Mockito.reset(threadPool); watchdog.unregister(matcher); command.run(); diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java index dcc7ab431611a..08a4965cdb371 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java @@ -11,8 +11,13 @@ import org.elasticsearch.test.ESTestCase; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.Map; -import java.util.TreeMap; +import java.util.Set; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.containsString; public class PatternBankTests extends ESTestCase { @@ -32,7 +37,7 @@ public void testBankCannotBeNull() { public void testConstructorValidatesCircularReferences() { var e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); } public void testExtendWith() { @@ -48,36 +53,36 @@ public void testExtendWith() { public void testCircularReference() { var e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows( IllegalArgumentException.class, () -> { PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name:int}!!!")); } ); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name:int}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME2][!!!%{NAME1}!!!] back to pattern [NAME1]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME4}!!!"); @@ -85,10 +90,78 @@ public void testCircularReference() { bank.put("NAME5", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals( - "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2=>NAME3=>NAME4]", - e.getMessage() - ); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME4->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!%{NAME2}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME2->NAME3->NAME2", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME2}!!%{NAME3}!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME4->NAME5->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME3->NAME1", e.getMessage()); } public void testCircularSelfReference() { @@ -96,7 +169,7 @@ public void testCircularSelfReference() { IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("ANOTHER", "%{INT}", "INT", "%{INT}")) ); - assertEquals("circular reference in pattern [INT][%{INT}]", e.getMessage()); + assertEquals("circular reference detected: INT->INT", e.getMessage()); } public void testInvalidPatternReferences() { @@ -112,4 +185,80 @@ public void testInvalidPatternReferences() { ); assertEquals("pattern [%{VALID] has an invalid syntax", e.getMessage()); } + + public void testDeepGraphOfPatterns() { + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 20_000; + for (int i = 0; i < nodeCount - 1; i++) { + patternBankMap.put("FOO" + i, "%{FOO" + (i + 1) + "}"); + } + patternBankMap.put("FOO" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithoutCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. But there are no cycles in any of + * these since each pattern only references patterns with a higher ID. We don't expect any exceptions here. + */ + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 500; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + for (int j = 0; j < randomIntBetween(0, 20); j++) { + patternBuilder.append("%{FOO-" + randomIntBetween(i + 1, nodeCount - 1) + "}"); + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. We have at least one cycle because + * we pick a node at random, and make sure that a node that it links (or one of its descendants) to links back. If no descendant + * links back to it, we create an artificial cycle at the end. + */ + Map patternBankMap = new LinkedHashMap<>(); + final int nodeCount = 500; + int nodeToHaveCycle = randomIntBetween(0, nodeCount); + int nodeToPotentiallyCreateCycle = -1; + boolean haveCreatedCycle = false; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + int numberOfLinkedPatterns = randomIntBetween(1, 20); + int nodeToLinkBackIndex = randomIntBetween(0, numberOfLinkedPatterns); + Set childNodes = new HashSet<>(); + for (int j = 0; j < numberOfLinkedPatterns; j++) { + int childNode = randomIntBetween(i + 1, nodeCount - 1); + childNodes.add(childNode); + patternBuilder.append("%{FOO-" + childNode + "}"); + if (i == nodeToHaveCycle) { + if (nodeToLinkBackIndex == j) { + nodeToPotentiallyCreateCycle = childNode; + } + } + } + if (i == nodeToPotentiallyCreateCycle) { + // We either create the cycle here, or randomly pick a child node to maybe create the cycle + if (randomBoolean()) { + patternBuilder.append("%{FOO-" + nodeToHaveCycle + "}"); + haveCreatedCycle = true; + } else { + nodeToPotentiallyCreateCycle = randomFrom(childNodes); + } + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + if (haveCreatedCycle) { + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + } else { + // We didn't randomly create a cycle, so just force one in this last pattern + nodeToHaveCycle = nodeCount - 1; + patternBankMap.put("FOO-" + nodeToHaveCycle, "%{FOO-" + nodeToHaveCycle + "}"); + } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(patternBankMap)); + assertThat(e.getMessage(), containsString("FOO-" + nodeToHaveCycle)); + } } diff --git a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/threadpool/ThreadPoolBridge.java b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/threadpool/ThreadPoolBridge.java index 13218a9b206a5..30801b4f0b078 100644 --- a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/threadpool/ThreadPoolBridge.java +++ b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/threadpool/ThreadPoolBridge.java @@ -10,6 +10,7 @@ import org.elasticsearch.logstashbridge.StableBridgeAPI; import org.elasticsearch.logstashbridge.common.SettingsBridge; import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.TimeUnit; @@ -17,7 +18,7 @@ public class ThreadPoolBridge extends StableBridgeAPI.Proxy { public ThreadPoolBridge(final SettingsBridge settingsBridge) { - this(new ThreadPool(settingsBridge.unwrap(), MeterRegistry.NOOP)); + this(new ThreadPool(settingsBridge.unwrap(), MeterRegistry.NOOP, new DefaultBuiltInExecutorBuilders())); } public ThreadPoolBridge(final ThreadPool delegate) { diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 454581ae70b51..e0233187425ea 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -8,14 +8,15 @@ package org.elasticsearch.nativeaccess.jna; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -24,6 +25,10 @@ public class JnaNativeLibraryProvider extends NativeLibraryProvider { + static { + setJnaLibraryPath(); + } + public JnaNativeLibraryProvider() { super( "jna", @@ -38,8 +43,6 @@ public JnaNativeLibraryProvider() { JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, - SystemdLibrary.class, - JnaSystemdLibrary::new, ZstdLibrary.class, JnaZstdLibrary::new, VectorLibrary.class, @@ -48,6 +51,11 @@ public JnaNativeLibraryProvider() { ); } + @SuppressForbidden(reason = "jna library path must be set for load library to work with our own libs") + private static void setJnaLibraryPath() { + System.setProperty("jna.library.path", LoaderHelper.platformLibDir.toString()); + } + private static Supplier notImplemented() { return () -> { throw new AssertionError(); }; } diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index d984d239e0b39..82a69e4864d94 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -16,6 +16,7 @@ import com.sun.jna.Pointer; import com.sun.jna.Structure; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Arrays; @@ -109,6 +110,16 @@ public long bytesalloc() { } } + public static class JnaSockAddr implements SockAddr { + final Memory memory; + + JnaSockAddr(String path) { + this.memory = new Memory(110); + memory.setShort(0, AF_UNIX); + memory.setString(2, path, "UTF-8"); + } + } + private interface NativeFunctions extends Library { int geteuid(); @@ -126,6 +137,12 @@ private interface NativeFunctions extends Library { int close(int fd); + int socket(int domain, int type, int protocol); + + int connect(int sockfd, Pointer addr, int addrlen); + + long send(int sockfd, Pointer buf, long buflen, int flags); + String strerror(int errno); } @@ -235,6 +252,30 @@ public int fstat64(int fd, Stat64 stats) { return fstat64.fstat64(fd, jnaStats.memory); } + @Override + public int socket(int domain, int type, int protocol) { + return functions.socket(domain, type, protocol); + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JnaSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JnaSockAddr; + var jnaAddr = (JnaSockAddr) addr; + return functions.connect(sockfd, jnaAddr.memory, (int) jnaAddr.memory.size()); + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JnaCloseableByteBuffer; + var nativeBuffer = (JnaCloseableByteBuffer) buffer; + return functions.send(sockfd, nativeBuffer.memory, nativeBuffer.buffer().remaining(), flags); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java deleted file mode 100644 index f06361e8807c5..0000000000000 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jna; - -import com.sun.jna.Library; -import com.sun.jna.Native; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -class JnaSystemdLibrary implements SystemdLibrary { - private interface NativeFunctions extends Library { - int sd_notify(int unset_environment, String state); - } - - private final NativeFunctions functions; - - JnaSystemdLibrary() { - this.functions = Native.load("libsystemd.so.0", NativeFunctions.class); - } - - @Override - public int sd_notify(int unset_environment, String state) { - return functions.sd_notify(unset_environment, state); - } -} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index f6e6035a8aba6..e1ea28e8786f5 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -12,7 +12,7 @@ import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Map; @@ -92,7 +92,14 @@ record Arch( LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64)); this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); - this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); + String socketPath = System.getenv("NOTIFY_SOCKET"); + if (socketPath == null) { + this.systemd = null; // not running under systemd + } else { + logger.debug("Systemd socket path: {}", socketPath); + var buffer = newBuffer(64); + this.systemd = new Systemd(libraryProvider.getLibrary(PosixCLibrary.class), socketPath, buffer); + } } @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index 2ce09e567c284..1cf08fce0c2af 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -8,6 +8,7 @@ package org.elasticsearch.nativeaccess; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; @@ -147,10 +148,22 @@ public OptionalLong allocatedSizeInBytes(Path path) { return OptionalLong.of(stats.st_blocks() * 512); } + @SuppressForbidden(reason = "Using mkdirs") @Override public void tryPreallocate(Path file, long newSize) { + var absolutePath = file.toAbsolutePath(); + var directory = absolutePath.getParent(); + directory.toFile().mkdirs(); // get fd and current size, then pass to OS variant - int fd = libc.open(file.toAbsolutePath().toString(), O_WRONLY, constants.O_CREAT()); + // We pass down O_CREAT, so open will create the file if it does not exist. + // From the open man page (https://www.man7.org/linux/man-pages/man2/open.2.html): + // - The mode parameter is needed when specifying O_CREAT + // - The effective mode is modified by the process's umask: in the absence of a default ACL, the mode of the created file is + // (mode & ~umask). + // We choose to pass down 0666 (r/w permission for user/group/others) to mimic what the JDK does for its open operations; + // see for example the fileOpen implementation in libjava: + // https://github.com/openjdk/jdk/blob/98562166e4a4c8921709014423c6cbc993aa0d97/src/java.base/unix/native/libjava/io_util_md.c#L105 + int fd = libc.open(absolutePath.toString(), O_WRONLY | constants.O_CREAT(), 0666); if (fd == -1) { logger.warn("Could not open file [" + file + "] to preallocate size: " + libc.strerror(libc.errno())); return; diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java index 4deade118b788..058cfe77b1ff3 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java @@ -10,17 +10,28 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import java.util.Locale; +import java.nio.charset.StandardCharsets; +/** + * Wraps access to notifications to systemd. + *

+ * Systemd notifications are done through a Unix socket. Although Java does support + * opening unix sockets, it unfortunately does not support datagram sockets. This class + * instead opens and communicates with the socket using native methods. + */ public class Systemd { private static final Logger logger = LogManager.getLogger(Systemd.class); - private final SystemdLibrary lib; + private final PosixCLibrary libc; + private final String socketPath; + private final CloseableByteBuffer buffer; - Systemd(SystemdLibrary lib) { - this.lib = lib; + Systemd(PosixCLibrary libc, String socketPath, CloseableByteBuffer buffer) { + this.libc = libc; + this.socketPath = socketPath; + this.buffer = buffer; } /** @@ -41,15 +52,61 @@ public void notify_stopping() { } private void notify(String state, boolean warnOnError) { - int rc = lib.sd_notify(0, state); - logger.trace("sd_notify({}, {}) returned [{}]", 0, state, rc); - if (rc < 0) { - String message = String.format(Locale.ROOT, "sd_notify(%d, %s) returned error [%d]", 0, state, rc); - if (warnOnError) { - logger.warn(message); + int sockfd = libc.socket(PosixCLibrary.AF_UNIX, PosixCLibrary.SOCK_DGRAM, 0); + if (sockfd < 0) { + throwOrLog("Could not open systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + RuntimeException error = null; + try { + var sockAddr = libc.newUnixSockAddr(socketPath); + if (libc.connect(sockfd, sockAddr) != 0) { + throwOrLog("Could not connect to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + + byte[] bytes = state.getBytes(StandardCharsets.US_ASCII); + final long bytesSent; + synchronized (buffer) { + buffer.buffer().clear(); + buffer.buffer().put(0, bytes); + buffer.buffer().limit(bytes.length); + bytesSent = libc.send(sockfd, buffer, 0); + } + + if (bytesSent == -1) { + throwOrLog("Failed to send message (" + state + ") to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } else if (bytesSent != bytes.length) { + throwOrLog("Not all bytes of message (" + state + ") sent to systemd socket (sent " + bytesSent + ")", warnOnError); } else { - throw new RuntimeException(message); + logger.trace("Message (" + state + ") sent to systemd"); + } + } catch (RuntimeException e) { + error = e; + } finally { + if (libc.close(sockfd) != 0) { + try { + throwOrLog("Could not close systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } catch (RuntimeException e) { + if (error != null) { + error.addSuppressed(e); + throw error; + } else { + throw e; + } + } + } else if (error != null) { + throw error; } } } + + private void throwOrLog(String message, boolean warnOnError) { + if (warnOnError) { + logger.warn(message); + } else { + logger.error(message); + throw new RuntimeException(message); + } + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java new file mode 100644 index 0000000000000..42ca60b81a027 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * A utility for loading libraries from Elasticsearch's platform specific lib dir. + */ +public class LoaderHelper { + public static final Path platformLibDir = findPlatformLibDir(); + + private static Path findPlatformLibDir() { + // tests don't have an ES install, so the platform dir must be passed in explicitly + String path = System.getProperty("es.nativelibs.path"); + if (path != null) { + return Paths.get(path); + } + + Path platformDir = Paths.get("lib", "platform"); + + String osname = System.getProperty("os.name"); + String os; + if (osname.startsWith("Windows")) { + os = "windows"; + } else if (osname.startsWith("Linux")) { + os = "linux"; + } else if (osname.startsWith("Mac OS")) { + os = "darwin"; + } else { + os = "unsupported_os[" + osname + "]"; + } + String archname = System.getProperty("os.arch"); + String arch; + if (archname.equals("amd64") || archname.equals("x86_64")) { + arch = "x64"; + } else if (archname.equals("aarch64")) { + arch = archname; + } else { + arch = "unsupported_arch[" + archname + "]"; + } + return platformDir.resolve(os + "-" + arch); + } + + public static void loadLibrary(String libname) { + Path libpath = platformLibDir.resolve(System.mapLibraryName(libname)); + if (Files.exists(libpath) == false) { + throw new UnsatisfiedLinkError("Native library [" + libpath + "] does not exist"); + } + System.load(libpath.toAbsolutePath().toString()); + } + + private LoaderHelper() {} // no construction +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index faa0e861dc63f..cdd0a56c52a90 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,5 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, - VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, VectorLibrary, + ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 0e7d07d0ad623..ac34fcb23b3eb 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -8,11 +8,19 @@ package org.elasticsearch.nativeaccess.lib; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + /** * Provides access to methods in libc.so available on POSIX systems. */ public non-sealed interface PosixCLibrary extends NativeLibrary { + /** socket domain indicating unix file socket */ + short AF_UNIX = 1; + + /** socket type indicating a datagram-oriented socket */ + int SOCK_DGRAM = 2; + /** * Gets the effective userid of the current process. * @@ -68,8 +76,6 @@ interface Stat64 { int open(String pathname, int flags); - int close(int fd); - int fstat64(int fd, Stat64 stats); int ftruncate(int fd, long length); @@ -90,6 +96,55 @@ interface FStore { int fcntl(int fd, int cmd, FStore fst); + /** + * Open a file descriptor to connect to a socket. + * + * @param domain The socket protocol family, eg AF_UNIX + * @param type The socket type, eg SOCK_DGRAM + * @param protocol The protocol for the given protocl family, normally 0 + * @return an open file descriptor, or -1 on failure with errno set + * @see socket manpage + */ + int socket(int domain, int type, int protocol); + + /** + * Marker interface for sockaddr struct implementations. + */ + interface SockAddr {} + + /** + * Create a sockaddr for the AF_UNIX family. + */ + SockAddr newUnixSockAddr(String path); + + /** + * Connect a socket to an address. + * + * @param sockfd An open socket file descriptor + * @param addr The address to connect to + * @return 0 on success, -1 on failure with errno set + */ + int connect(int sockfd, SockAddr addr); + + /** + * Send a message to a socket. + * + * @param sockfd The open socket file descriptor + * @param buffer The message bytes to send + * @param flags Flags that may adjust how the message is sent + * @return The number of bytes sent, or -1 on failure with errno set + * @see send manpage + */ + long send(int sockfd, CloseableByteBuffer buffer, int flags); + + /** + * Close a file descriptor + * @param fd The file descriptor to close + * @return 0 on success, -1 on failure with errno set + * @see close manpage + */ + int close(int fd); + /** * Return a string description for an error. * diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java deleted file mode 100644 index 3c4ffefb6e41f..0000000000000 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.lib; - -public non-sealed interface SystemdLibrary extends NativeLibrary { - int sd_notify(int unset_environment, String state); -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java index a3ddc0d59890d..0294b721aa6a8 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkKernel32Library.java @@ -56,7 +56,7 @@ class JdkKernel32Library implements Kernel32Library { ); private static final MethodHandle SetProcessWorkingSetSize$mh = downcallHandleWithError( "SetProcessWorkingSetSize", - FunctionDescriptor.of(ADDRESS, JAVA_LONG, JAVA_LONG) + FunctionDescriptor.of(JAVA_BOOLEAN, ADDRESS, JAVA_LONG, JAVA_LONG) ); private static final MethodHandle GetCompressedFileSizeW$mh = downcallHandleWithError( "GetCompressedFileSizeW", @@ -115,7 +115,7 @@ static class JdkAddress implements Address { @Override public Address add(long offset) { - return new JdkAddress(MemorySegment.ofAddress(address.address())); + return new JdkAddress(MemorySegment.ofAddress(address.address() + offset)); } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index cbd43a394379b..1ac7d6c6f897d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -14,7 +14,6 @@ import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -36,8 +35,6 @@ public JdkNativeLibraryProvider() { JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, - SystemdLibrary.class, - JdkSystemdLibrary::new, ZstdLibrary.class, JdkZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 7affd0614461d..f5e3132b76b56 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -10,6 +10,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.lang.foreign.Arena; @@ -24,8 +25,10 @@ import static java.lang.foreign.MemoryLayout.PathElement.groupElement; import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; import static java.lang.foreign.ValueLayout.JAVA_INT; import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; import static org.elasticsearch.nativeaccess.jdk.MemorySegmentUtil.varHandleWithoutOffset; @@ -89,6 +92,18 @@ class JdkPosixCLibrary implements PosixCLibrary { } fstat$mh = fstat; } + private static final MethodHandle socket$mh = downcallHandleWithErrno( + "socket", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, JAVA_INT) + ); + private static final MethodHandle connect$mh = downcallHandleWithErrno( + "connect", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS, JAVA_INT) + ); + private static final MethodHandle send$mh = downcallHandleWithErrno( + "send", + FunctionDescriptor.of(JAVA_LONG, JAVA_INT, ADDRESS, JAVA_LONG, JAVA_INT) + ); static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -226,6 +241,44 @@ public int fstat64(int fd, Stat64 stat64) { } } + @Override + public int socket(int domain, int type, int protocol) { + try { + return (int) socket$mh.invokeExact(errnoState, domain, type, protocol); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JdkSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JdkSockAddr; + var jdkAddr = (JdkSockAddr) addr; + try { + return (int) connect$mh.invokeExact(errnoState, sockfd, jdkAddr.segment, (int) jdkAddr.segment.byteSize()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JdkCloseableByteBuffer; + var nativeBuffer = (JdkCloseableByteBuffer) buffer; + var segment = nativeBuffer.segment; + try { + logger.info("Sending {} bytes to socket", buffer.buffer().remaining()); + return (long) send$mh.invokeExact(errnoState, sockfd, segment, (long) buffer.buffer().remaining(), flags); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -326,4 +379,15 @@ public long bytesalloc() { return (long) st_bytesalloc$vh.get(segment); } } + + private static class JdkSockAddr implements SockAddr { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, MemoryLayout.sequenceLayout(108, JAVA_BYTE)); + final MemorySegment segment; + + JdkSockAddr(String path) { + segment = Arena.ofAuto().allocate(layout); + segment.set(JAVA_SHORT, 0, AF_UNIX); + MemorySegmentUtil.setString(segment, 2, path); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java deleted file mode 100644 index c34c8c070edc5..0000000000000 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jdk; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.foreign.Arena; -import java.lang.foreign.FunctionDescriptor; -import java.lang.foreign.MemorySegment; -import java.lang.invoke.MethodHandle; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static java.lang.foreign.ValueLayout.ADDRESS; -import static java.lang.foreign.ValueLayout.JAVA_INT; -import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; - -class JdkSystemdLibrary implements SystemdLibrary { - - static { - // Find and load libsystemd. We attempt all instances of - // libsystemd in case of multiarch systems, and stop when - // one is successfully loaded. If none can be loaded, - // UnsatisfiedLinkError will be thrown. - List paths = findLibSystemd(); - if (paths.isEmpty()) { - String libpath = System.getProperty("java.library.path"); - throw new UnsatisfiedLinkError("Could not find libsystemd in java.library.path: " + libpath); - } - UnsatisfiedLinkError last = null; - for (String path : paths) { - try { - System.load(path); - last = null; - break; - } catch (UnsatisfiedLinkError e) { - last = e; - } - } - if (last != null) { - throw last; - } - } - - // findLibSystemd returns a list of paths to instances of libsystemd - // found within java.library.path. - static List findLibSystemd() { - // Note: on some systems libsystemd does not have a non-versioned symlink. - // System.loadLibrary only knows how to find non-versioned library files, - // so we must manually check the library path to find what we need. - final Path libsystemd = Paths.get("libsystemd.so.0"); - final String libpath = System.getProperty("java.library.path"); - final List foundPaths = new ArrayList<>(); - Arrays.stream(libpath.split(":")).map(Paths::get).filter(Files::exists).forEach(rootPath -> { - try { - Files.walkFileTree(rootPath, new SimpleFileVisitor<>() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { - if (Files.isReadable(dir)) { - return FileVisitResult.CONTINUE; - } - return FileVisitResult.SKIP_SUBTREE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { - if (file.getFileName().equals(libsystemd)) { - foundPaths.add(file.toAbsolutePath().toString()); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFileFailed(Path file, IOException exc) { - return FileVisitResult.CONTINUE; - } - }); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - return foundPaths; - } - - private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); - - @Override - public int sd_notify(int unset_environment, String state) { - try (Arena arena = Arena.ofConfined()) { - MemorySegment nativeState = MemorySegmentUtil.allocateString(arena, state); - return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); - } catch (Throwable t) { - throw new AssertionError(t); - } - } -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index c92ad654c9b9a..a1032f1381d94 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import java.lang.foreign.FunctionDescriptor; @@ -29,7 +30,7 @@ public final class JdkVectorLibrary implements VectorLibrary { static final VectorSimilarityFunctions INSTANCE; static { - System.loadLibrary("vec"); + LoaderHelper.loadLibrary("vec"); final MethodHandle vecCaps$mh = downcallHandle("vec_caps", FunctionDescriptor.of(JAVA_INT)); try { diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java index e3e972bc19d72..284ac134d2036 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.CloseableByteBuffer; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.lang.foreign.FunctionDescriptor; @@ -24,7 +25,7 @@ class JdkZstdLibrary implements ZstdLibrary { static { - System.loadLibrary("zstd"); + LoaderHelper.loadLibrary("zstd"); } private static final MethodHandle compressBound$mh = downcallHandle("ZSTD_compressBound", FunctionDescriptor.of(JAVA_LONG, JAVA_INT)); diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index c65711af0f63f..6c4c9bd0111c0 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -22,6 +22,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getUtf8String(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setUtf8String(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateUtf8String(s); } diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 25c449337e294..23d9919603ab4 100644 --- a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -20,6 +20,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getString(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setString(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateFrom(s); } diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/PreallocateTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/PreallocateTests.java index c5d427c3aa47b..a95f4df6aef6f 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/PreallocateTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/PreallocateTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -17,8 +18,14 @@ import static org.hamcrest.Matchers.equalTo; public class PreallocateTests extends ESTestCase { - public void testPreallocate() throws IOException { + + @Before + public void setup() { assumeFalse("no preallocate on windows", System.getProperty("os.name").startsWith("Windows")); + assumeFalse("preallocate not supported on encrypted block devices", "encryption-at-rest".equals(System.getenv("BUILDKITE_LABEL"))); + } + + public void testPreallocate() throws IOException { Path file = createTempFile(); long size = 1024 * 1024; // 1 MB var nativeAccess = NativeAccess.instance(); @@ -27,4 +34,24 @@ public void testPreallocate() throws IOException { assertTrue(foundSize.isPresent()); assertThat(foundSize.getAsLong(), equalTo(size)); } + + public void testPreallocateNonExistingFile() { + Path file = createTempDir().resolve("intermediate-dir").resolve("test-preallocate"); + long size = 1024 * 1024; // 1 MB + var nativeAccess = NativeAccess.instance(); + nativeAccess.tryPreallocate(file, size); + OptionalLong foundSize = nativeAccess.allocatedSizeInBytes(file); + assertTrue(foundSize.isPresent()); + assertThat(foundSize.getAsLong(), equalTo(size)); + } + + public void testPreallocateNonExistingDirectory() { + Path file = createTempDir().resolve("intermediate-dir").resolve("test-preallocate"); + long size = 1024 * 1024; // 1 MB + var nativeAccess = NativeAccess.instance(); + nativeAccess.tryPreallocate(file, size); + OptionalLong foundSize = nativeAccess.allocatedSizeInBytes(file); + assertTrue(foundSize.isPresent()); + assertThat(foundSize.getAsLong(), equalTo(size)); + } } diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java index 9875878d8658a..cda4fc8c55444 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java @@ -49,7 +49,7 @@ public void testSystemPropertyDisabled() throws Exception { "-Xms4m", "-cp", jarPath + File.pathSeparator + System.getProperty("java.class.path"), - "-Djava.library.path=" + System.getProperty("java.library.path"), + "-Des.nativelibs.path=" + System.getProperty("es.nativelibs.path"), "p.Test" ).start(); String output = new String(process.getInputStream().readAllBytes(), UTF_8); diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java index aebee89297a88..7f5b005e28470 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java @@ -106,8 +106,12 @@ public static KeyStore filter(KeyStore store, Predicate filter) { * @param certificates The root certificates to trust */ public static KeyStore buildTrustStore(Iterable certificates) throws GeneralSecurityException { + return buildTrustStore(certificates, KeyStore.getDefaultType()); + } + + public static KeyStore buildTrustStore(Iterable certificates, String type) throws GeneralSecurityException { assert certificates != null : "Cannot create keystore with null certificates"; - KeyStore store = buildNewKeyStore(); + KeyStore store = buildNewKeyStore(type); int counter = 0; for (Certificate certificate : certificates) { store.setCertificateEntry("cert-" + counter, certificate); @@ -117,7 +121,11 @@ public static KeyStore buildTrustStore(Iterable certificates) throw } private static KeyStore buildNewKeyStore() throws GeneralSecurityException { - KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + return buildNewKeyStore(KeyStore.getDefaultType()); + } + + private static KeyStore buildNewKeyStore(String type) throws GeneralSecurityException { + KeyStore keyStore = KeyStore.getInstance(type); try { keyStore.load(null, null); } catch (IOException e) { diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java index 0be2b68d76a21..fc22bda52e104 100644 --- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java +++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java @@ -302,9 +302,13 @@ private void merge( addThis = projectedW <= wLimit; } if (i == 1 || i == incomingCount - 1) { - // force last centroid to never merge + // force first and last centroid to never merge addThis = false; } + if (lastUsedCell == mean.length - 1) { + // use the last centroid, there's no more + addThis = true; + } if (addThis) { // next point will fit diff --git a/libs/tdigest/src/test/java/org/elasticsearch/tdigest/MergingDigestTests.java b/libs/tdigest/src/test/java/org/elasticsearch/tdigest/MergingDigestTests.java index 16a81bad50756..9fadf2218f203 100644 --- a/libs/tdigest/src/test/java/org/elasticsearch/tdigest/MergingDigestTests.java +++ b/libs/tdigest/src/test/java/org/elasticsearch/tdigest/MergingDigestTests.java @@ -151,4 +151,14 @@ public void testFill() { i++; } } + + public void testLargeInputSmallCompression() { + MergingDigest td = new MergingDigest(10); + for (int i = 0; i < 10_000_000; i++) { + td.add(between(0, 3_600_000)); + } + assertTrue(td.centroidCount() < 100); + assertTrue(td.quantile(0.00001) < 100_000); + assertTrue(td.quantile(0.99999) > 3_000_000); + } } diff --git a/libs/tdigest/src/test/java/org/elasticsearch/tdigest/TDigestTests.java b/libs/tdigest/src/test/java/org/elasticsearch/tdigest/TDigestTests.java index 72b460da19da2..815346100532c 100644 --- a/libs/tdigest/src/test/java/org/elasticsearch/tdigest/TDigestTests.java +++ b/libs/tdigest/src/test/java/org/elasticsearch/tdigest/TDigestTests.java @@ -152,7 +152,7 @@ public void testQuantile() { hist2.compress(); double x1 = hist1.quantile(0.5); double x2 = hist2.quantile(0.5); - assertEquals(Dist.quantile(0.5, data), x1, 0.2); + assertEquals(Dist.quantile(0.5, data), x1, 0.25); assertEquals(x1, x2, 0.01); } diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index 41b65044735ca..6cf278e826d4c 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -12,7 +12,7 @@ base { archivesName = "x-content-impl" } -String jacksonVersion = "2.15.0" +String jacksonVersion = "2.17.2" dependencies { compileOnly project(':libs:elasticsearch-core') diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java index 2a8e7a4dfa12c..3aa8323eb5495 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/CborXContentImpl.java @@ -63,8 +63,8 @@ public XContentType type() { } @Override - public byte streamSeparator() { - throw new XContentParseException("cbor does not support stream parsing..."); + public byte bulkSeparator() { + throw new XContentParseException("cbor does not support bulk parsing..."); } @Override diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java index 2e4925b4a853e..4e04230a7486e 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentImpl.java @@ -54,6 +54,8 @@ public static final XContent jsonXContent() { jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); jsonFactory.configure(JsonParser.Feature.USE_FAST_DOUBLE_PARSER, true); + // keeping existing behavior of including source, for now + jsonFactory.configure(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION, true); jsonXContent = new JsonXContentImpl(); } @@ -65,7 +67,7 @@ public XContentType type() { } @Override - public byte streamSeparator() { + public byte bulkSeparator() { return '\n'; } diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java index 3c774c582c638..83528980c2b52 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/smile/SmileXContentImpl.java @@ -65,7 +65,7 @@ public XContentType type() { } @Override - public byte streamSeparator() { + public byte bulkSeparator() { return (byte) 0xFF; } diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java index 6a22508ba51c6..6e1496bfffd7b 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/yaml/YamlXContentImpl.java @@ -61,8 +61,8 @@ public XContentType type() { } @Override - public byte streamSeparator() { - throw new UnsupportedOperationException("yaml does not support stream parsing..."); + public byte bulkSeparator() { + throw new UnsupportedOperationException("yaml does not support bulk parsing..."); } @Override diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java index 146f90b8e2510..56eb308eaebae 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java @@ -24,7 +24,7 @@ public interface XContent { */ XContentType type(); - byte streamSeparator(); + byte bulkSeparator(); @Deprecated boolean detectContent(byte[] bytes, int offset, int length); diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 5037ed0b40664..add5a913faf8a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -148,6 +148,12 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { case LONG -> writeNumber(parser.longValue()); case FLOAT -> writeNumber(parser.floatValue()); case DOUBLE -> writeNumber(parser.doubleValue()); + case BIG_INTEGER -> writeNumber((BigInteger) parser.numberValue()); + // note: BIG_DECIMAL is not supported, ES only supports up to double. + // BIG_INTEGER above is only for representing unsigned long + default -> { + assert false : "missing xcontent number handling for type [" + parser.numberType() + "]"; + } } break; case VALUE_BOOLEAN: @@ -158,6 +164,9 @@ default void copyCurrentEvent(XContentParser parser) throws IOException { break; case VALUE_EMBEDDED_OBJECT: writeBinary(parser.binaryValue()); + break; + default: + assert false : "missing xcontent token handling for token [" + parser.text() + "]"; } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java index be100e1a6d120..9672c73ef56df 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java @@ -151,11 +151,8 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; - private static BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); - private static BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); - // weak bounds on the BigDecimal representation to allow for coercion - private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); - private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + private static final BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); + private static final BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same @@ -170,11 +167,21 @@ private static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { final BigDecimal bigDecimalValue = new BigDecimal(stringValue); - if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 - || bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + // long can have a maximum of 19 digits - any more than that cannot be a long + // the scale is stored as the negation, so negative scale -> big number + if (bigDecimalValue.scale() < -19) { throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); } - bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + // large scale -> very small number + if (bigDecimalValue.scale() > 19) { + if (coerce) { + bigIntegerValue = BigInteger.ZERO; + } else { + throw new ArithmeticException("Number has a decimal part"); + } + } else { + bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + } } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); } catch (NumberFormatException e) { diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java new file mode 100644 index 0000000000000..ab141f9af484c --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentGeneratorTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; + +public class XContentGeneratorTests extends ESTestCase { + + public void testCopyCurrentEventRoundtrip() throws Exception { + assertTypeCopy("null", "null"); + assertTypeCopy("string", "\"hi\""); + assertTypeCopy("integer", "1"); + assertTypeCopy("float", "1.0"); + assertTypeCopy("long", "5000000000"); + assertTypeCopy("double", "1.123456789"); + assertTypeCopy("biginteger", "18446744073709551615"); + } + + private void assertTypeCopy(String typename, String value) throws Exception { + var input = String.format(Locale.ROOT, "{\"%s\":%s,\"%s_in_array\":[%s]}", typename, value, typename, value); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + try ( + var generator = JsonXContent.jsonXContent.createGenerator(outputStream); + var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) + ) { + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + generator.copyCurrentEvent(parser); + } + generator.copyCurrentEvent(parser); // copy end object too + } + assertThat(outputStream.toString(StandardCharsets.UTF_8), equalTo(input)); + } +} diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java index c8df9929d007b..58cb0af79e103 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/XContentParserTests.java @@ -31,6 +31,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; @@ -74,6 +75,73 @@ public void testFloat() throws IOException { } } + public void testLongCoercion() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + + builder.field("five", "5.5"); + builder.field("minusFive", "-5.5"); + + builder.field("minNegative", "-9.2233720368547758089999e18"); + builder.field("tooNegative", "-9.223372036854775809e18"); + builder.field("maxPositive", "9.2233720368547758079999e18"); + builder.field("tooPositive", "9.223372036854775808e18"); + + builder.field("expTooBig", "2e100"); + builder.field("minusExpTooBig", "-2e100"); + builder.field("maxPositiveExp", "1e2147483647"); + builder.field("tooPositiveExp", "1e2147483648"); + + builder.field("expTooSmall", "2e-100"); + builder.field("minusExpTooSmall", "-2e-100"); + builder.field("maxNegativeExp", "1e-2147483647"); + + builder.field("tooNegativeExp", "1e-2147483648"); + + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertThat(parser.nextToken(), is(XContentParser.Token.START_OBJECT)); + + assertFieldWithValue("five", 5L, parser); + assertFieldWithValue("minusFive", -5L, parser); // Rounds toward zero + + assertFieldWithValue("minNegative", Long.MIN_VALUE, parser); + assertFieldWithInvalidLongValue("tooNegative", parser); + assertFieldWithValue("maxPositive", Long.MAX_VALUE, parser); + assertFieldWithInvalidLongValue("tooPositive", parser); + + assertFieldWithInvalidLongValue("expTooBig", parser); + assertFieldWithInvalidLongValue("minusExpTooBig", parser); + assertFieldWithInvalidLongValue("maxPositiveExp", parser); + assertFieldWithInvalidLongValue("tooPositiveExp", parser); + + // too small goes to zero + assertFieldWithValue("expTooSmall", 0L, parser); + assertFieldWithValue("minusExpTooSmall", 0L, parser); + assertFieldWithValue("maxNegativeExp", 0L, parser); + + assertFieldWithInvalidLongValue("tooNegativeExp", parser); + } + } + } + + private static void assertFieldWithValue(String fieldName, long fieldValue, XContentParser parser) throws IOException { + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is(fieldName)); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + assertThat(parser.longValue(), equalTo(fieldValue)); + } + + private static void assertFieldWithInvalidLongValue(String fieldName, XContentParser parser) throws IOException { + assertThat(parser.nextToken(), is(XContentParser.Token.FIELD_NAME)); + assertThat(parser.currentName(), is(fieldName)); + assertThat(parser.nextToken(), is(XContentParser.Token.VALUE_STRING)); + expectThrows(IllegalArgumentException.class, parser::longValue); + } + public void testReadList() throws IOException { assertThat(readList("{\"foo\": [\"bar\"]}"), contains("bar")); assertThat(readList("{\"foo\": [\"bar\",\"baz\"]}"), contains("bar", "baz")); diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java index 18b24123e6cf0..63f58bbb75713 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesAggregationsUnlimitedDimensionsIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -102,15 +101,11 @@ private CreateIndexResponse prepareTimeSeriesIndex( final String[] routingDimensions ) { return prepareCreate("index").setSettings( - Settings.builder() - .put("mode", "time_series") + indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series") .put("routing_path", String.join(",", routingDimensions)) - .put("index.number_of_shards", randomIntBetween(1, 3)) - .put("index.number_of_replicas", randomIntBetween(1, 3)) .put("time_series.start_time", startMillis) .put("time_series.end_time", endMillis) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) - .build() ).setMapping(mapping).get(); } diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java index 3287f50ab1739..2967e6f5e322f 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -103,15 +102,11 @@ private CreateIndexResponse prepareTimeSeriesIndex( final String[] routingDimensions ) { return prepareCreate("index").setSettings( - Settings.builder() - .put("mode", "time_series") + indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series") .put("routing_path", String.join(",", routingDimensions)) - .put("index.number_of_shards", randomIntBetween(1, 3)) - .put("index.number_of_replicas", randomIntBetween(1, 3)) .put("time_series.start_time", startMillis) .put("time_series.end_time", endMillis) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) - .build() ).setMapping(mapping).get(); } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 1e3042f8cf1e4..b813c9ec50c83 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -213,7 +213,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { bucketInnerInterval = 1; // Calculated on merge. } // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.V_8_14_0)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingLong(b -> b.key)); } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 77fd095806d10..1fc42a1b294fe 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -36,3 +36,6 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") } +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java index e8164bfbb8f36..4be28483e0d30 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java @@ -13,6 +13,9 @@ import org.elasticsearch.action.admin.indices.analyze.ReloadAnalyzersRequest; import org.elasticsearch.action.admin.indices.analyze.ReloadAnalyzersResponse; import org.elasticsearch.action.admin.indices.analyze.TransportReloadAnalyzersAction; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -24,13 +27,14 @@ import java.io.PrintWriter; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -55,9 +59,8 @@ private void testSynonymsUpdate(boolean preview) throws FileNotFoundException, I Path config = internalCluster().getInstance(Environment.class).configFile(); String synonymsFileName = "synonyms.txt"; Path synonymsFile = config.resolve(synonymsFileName); - try (PrintWriter out = new PrintWriter(new OutputStreamWriter(Files.newOutputStream(synonymsFile), StandardCharsets.UTF_8))) { - out.println("foo, baz"); - } + writeFile(synonymsFile, "foo, baz"); + assertAcked( indicesAdmin().prepareCreate("test") .setSettings( @@ -75,48 +78,114 @@ private void testSynonymsUpdate(boolean preview) throws FileNotFoundException, I assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "buzz")), 0L); - Response analyzeResponse = indicesAdmin().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get(); - assertEquals(2, analyzeResponse.getTokens().size()); - assertEquals("foo", analyzeResponse.getTokens().get(0).getTerm()); - assertEquals("baz", analyzeResponse.getTokens().get(1).getTerm()); + assertAnalysis("test", "my_synonym_analyzer", "foo", Set.of("foo", "baz")); // now update synonyms file several times and trigger reloading for (int i = 0; i < 10; i++) { String testTerm = randomAlphaOfLength(10); - try ( - PrintWriter out = new PrintWriter( - new OutputStreamWriter(Files.newOutputStream(synonymsFile, StandardOpenOption.WRITE), StandardCharsets.UTF_8) - ) - ) { - out.println("foo, baz, " + testTerm); - } + writeFile(synonymsFile, "foo, baz, " + testTerm, StandardOpenOption.WRITE); + ReloadAnalyzersResponse reloadResponse = client().execute( TransportReloadAnalyzersAction.TYPE, new ReloadAnalyzersRequest(null, preview, "test") ).actionGet(); - assertNoFailures(reloadResponse); - assertEquals(cluster().numDataNodes(), reloadResponse.getSuccessfulShards()); - assertTrue(reloadResponse.getReloadDetails().containsKey("test")); - assertEquals("test", reloadResponse.getReloadDetails().get("test").getIndexName()); - assertEquals( - Collections.singleton("my_synonym_analyzer"), - reloadResponse.getReloadDetails().get("test").getReloadedAnalyzers() - ); - - analyzeResponse = indicesAdmin().prepareAnalyze("test", "foo").setAnalyzer("my_synonym_analyzer").get(); - int expectedTokens = preview ? 2 : 3; - assertEquals(expectedTokens, analyzeResponse.getTokens().size()); - Set tokens = new HashSet<>(); - analyzeResponse.getTokens().stream().map(AnalyzeToken::getTerm).forEach(t -> tokens.add(t)); - assertTrue(tokens.contains("foo")); - assertTrue(tokens.contains("baz")); - if (preview == false) { - assertTrue(tokens.contains(testTerm)); - } + assertReloadAnalyzers(reloadResponse, cluster().numDataNodes(), Map.of("test", Set.of("my_synonym_analyzer"))); + + Set expectedTokens = preview ? Set.of("foo", "baz") : Set.of("foo", "baz", testTerm); + assertAnalysis("test", "my_synonym_analyzer", "foo", expectedTokens); assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", "baz")), 1L); long expectedHitCount = preview ? 0L : 1L; assertHitCount(prepareSearch("test").setQuery(QueryBuilders.matchQuery("field", testTerm)), expectedHitCount); } } + + public void testSynonymsUpdateInvalid() throws IOException { + final String indexName = "test-invalid"; + final String synonymsFileName = "synonyms.txt"; + final String fieldName = "field"; + + Path config = internalCluster().getInstance(Environment.class).configFile(); + Path synonymsFile = config.resolve(synonymsFileName); + writeFile(synonymsFile, "foo, baz"); + + assertAcked( + indicesAdmin().prepareCreate(indexName) + .setSettings( + indexSettings(cluster().numDataNodes(), 0).put("analysis.analyzer.my_synonym_analyzer.tokenizer", "standard") + .putList("analysis.analyzer.my_synonym_analyzer.filter", "my_stop_filter", "my_synonym_filter") + .put("analysis.filter.my_stop_filter.type", "stop") + .put("analysis.filter.my_stop_filter.stopwords", "bar") + .put("analysis.filter.my_synonym_filter.type", "synonym") + .put("analysis.filter.my_synonym_filter.updateable", "true") + .put("analysis.filter.my_synonym_filter.synonyms_path", synonymsFileName) + ) + .setMapping(fieldName, "type=text,analyzer=standard,search_analyzer=my_synonym_analyzer") + ); + ensureGreen(indexName); + + prepareIndex(indexName).setId("1").setSource(fieldName, "foo").get(); + assertNoFailures(indicesAdmin().prepareRefresh(indexName).get()); + + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "baz")), 1); + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "buzz")), 0); + assertAnalysis(indexName, "my_synonym_analyzer", "foo", Set.of("foo", "baz")); + + // Add an invalid synonym to the file and reload analyzers + writeFile(synonymsFile, "foo, baz, bar, buzz"); + ReloadAnalyzersResponse reloadResponse = client().execute( + TransportReloadAnalyzersAction.TYPE, + new ReloadAnalyzersRequest(null, false, indexName) + ).actionGet(); + assertReloadAnalyzers(reloadResponse, cluster().numDataNodes(), Map.of(indexName, Set.of("my_synonym_analyzer"))); + + ensureGreen(indexName); + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "baz")), 1); + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "buzz")), 1); + assertAnalysis(indexName, "my_synonym_analyzer", "foo", Set.of("foo", "baz", "buzz")); + + // Reload the index + reloadIndex(indexName); + ensureGreen(indexName); + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "baz")), 1); + assertHitCount(prepareSearch(indexName).setQuery(QueryBuilders.matchQuery(fieldName, "buzz")), 1); + assertAnalysis(indexName, "my_synonym_analyzer", "foo", Set.of("foo", "baz", "buzz")); + } + + private static void writeFile(Path path, String contents, OpenOption... options) throws IOException { + try (PrintWriter out = new PrintWriter(new OutputStreamWriter(Files.newOutputStream(path, options), StandardCharsets.UTF_8))) { + out.println(contents); + } + } + + private static void assertAnalysis(String indexName, String analyzerName, String input, Set expectedTokens) { + Response response = indicesAdmin().prepareAnalyze(indexName, input).setAnalyzer(analyzerName).get(); + Set actualTokens = response.getTokens().stream().map(AnalyzeToken::getTerm).collect(Collectors.toSet()); + assertEquals(expectedTokens, actualTokens); + } + + private static void assertReloadAnalyzers( + ReloadAnalyzersResponse response, + int expectedSuccessfulShards, + Map> expectedIndicesAndAnalyzers + ) { + assertNoFailures(response); + assertEquals(expectedSuccessfulShards, response.getSuccessfulShards()); + + assertEquals(expectedIndicesAndAnalyzers.size(), response.getReloadDetails().size()); + for (var expectedIndexAndAnalyzers : expectedIndicesAndAnalyzers.entrySet()) { + String expectedIndexName = expectedIndexAndAnalyzers.getKey(); + Set expectedAnalyzers = expectedIndexAndAnalyzers.getValue(); + + ReloadAnalyzersResponse.ReloadDetails reloadDetails = response.getReloadDetails().get(expectedIndexName); + assertNotNull(reloadDetails); + assertEquals(expectedAnalyzers, reloadDetails.getReloadedAnalyzers()); + } + } + + private static void reloadIndex(String indexName) { + final TimeValue timeout = TimeValue.timeValueSeconds(30); + assertAcked(indicesAdmin().close(new CloseIndexRequest(indexName)).actionGet(timeout)); + assertAcked(indicesAdmin().open(new OpenIndexRequest(indexName)).actionGet(timeout)); + } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymGraphTokenFilterFactory.java index 9626e44a74bd3..7691253313d50 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymGraphTokenFilterFactory.java @@ -51,8 +51,8 @@ public TokenFilterFactory getChainAwareTokenFilterFactory( Function allFilters ) { final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters); - ReaderWithOrigin rulesFromSettings = getRulesFromSettings(environment, context); - final SynonymMap synonyms = buildSynonyms(analyzer, rulesFromSettings); + ReaderWithOrigin rulesReader = synonymsSource.getRulesReader(this, context); + final SynonymMap synonyms = buildSynonyms(analyzer, rulesReader); final String name = name(); return new TokenFilterFactory() { @Override @@ -72,7 +72,7 @@ public AnalysisMode getAnalysisMode() { @Override public String getResourceName() { - return rulesFromSettings.resource(); + return rulesReader.resource(); } }; } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java index 0462511f2ef87..762de01b93d51 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java @@ -18,6 +18,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.Analysis; import org.elasticsearch.index.analysis.AnalysisMode; @@ -34,6 +35,101 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { + protected enum SynonymsSource { + INLINE("synonyms") { + @Override + public ReaderWithOrigin getRulesReader(SynonymTokenFilterFactory factory, IndexCreationContext context) { + List rulesList = Analysis.getWordList( + factory.environment, + factory.settings, + SynonymsSource.INLINE.getSettingName() + ); + StringBuilder sb = new StringBuilder(); + for (String line : rulesList) { + sb.append(line).append(System.lineSeparator()); + } + return new ReaderWithOrigin(new StringReader(sb.toString()), "'" + factory.name() + "' analyzer settings"); + } + }, + INDEX("synonyms_set") { + @Override + public ReaderWithOrigin getRulesReader(SynonymTokenFilterFactory factory, IndexCreationContext context) { + if (factory.analysisMode != AnalysisMode.SEARCH_TIME) { + throw new IllegalArgumentException( + "Can't apply [" + + SynonymsSource.INDEX.getSettingName() + + "]! Loading synonyms from index is supported only for search time synonyms!" + ); + } + String synonymsSet = factory.settings.get(SynonymsSource.INDEX.getSettingName(), null); + // provide empty synonyms on index creation and index metadata checks to ensure that we + // don't block a master thread + ReaderWithOrigin reader; + if (context != IndexCreationContext.RELOAD_ANALYZERS) { + reader = new ReaderWithOrigin( + new StringReader(""), + "fake empty [" + synonymsSet + "] synonyms_set in .synonyms index", + synonymsSet + ); + } else { + reader = new ReaderWithOrigin( + Analysis.getReaderFromIndex(synonymsSet, factory.synonymsManagementAPIService), + "[" + synonymsSet + "] synonyms_set in .synonyms index", + synonymsSet + ); + } + + return reader; + } + }, + LOCAL_FILE("synonyms_path") { + @Override + public ReaderWithOrigin getRulesReader(SynonymTokenFilterFactory factory, IndexCreationContext context) { + String synonymsPath = factory.settings.get(SynonymsSource.LOCAL_FILE.getSettingName(), null); + return new ReaderWithOrigin( + // Pass the inline setting name because "_path" is appended by getReaderFromFile + Analysis.getReaderFromFile(factory.environment, synonymsPath, SynonymsSource.INLINE.getSettingName()), + synonymsPath + ); + } + }; + + private final String settingName; + + SynonymsSource(String settingName) { + this.settingName = settingName; + } + + public abstract ReaderWithOrigin getRulesReader(SynonymTokenFilterFactory factory, IndexCreationContext context); + + public String getSettingName() { + return settingName; + } + + public static SynonymsSource fromSettings(Settings settings) { + SynonymsSource synonymsSource; + if (settings.hasValue(SynonymsSource.INLINE.getSettingName())) { + synonymsSource = SynonymsSource.INLINE; + } else if (settings.hasValue(SynonymsSource.INDEX.getSettingName())) { + synonymsSource = SynonymsSource.INDEX; + } else if (settings.hasValue(SynonymsSource.LOCAL_FILE.getSettingName())) { + synonymsSource = SynonymsSource.LOCAL_FILE; + } else { + throw new IllegalArgumentException( + "synonym requires either `" + + SynonymsSource.INLINE.getSettingName() + + "`, `" + + SynonymsSource.INDEX.getSettingName() + + "` or `" + + SynonymsSource.LOCAL_FILE.getSettingName() + + "` to be configured" + ); + } + + return synonymsSource; + } + } + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(SynonymTokenFilterFactory.class); private final String format; @@ -43,6 +139,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { protected final Environment environment; protected final AnalysisMode analysisMode; private final SynonymsManagementAPIService synonymsManagementAPIService; + protected final SynonymsSource synonymsSource; SynonymTokenFilterFactory( IndexSettings indexSettings, @@ -62,10 +159,15 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { + "Instead, insert a lowercase filter in the filter chain before the synonym_graph filter." ); } + + this.synonymsSource = SynonymsSource.fromSettings(settings); this.expand = settings.getAsBoolean("expand", true); - this.lenient = settings.getAsBoolean("lenient", false); this.format = settings.get("format", ""); boolean updateable = settings.getAsBoolean("updateable", false); + this.lenient = settings.getAsBoolean( + "lenient", + indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.LENIENT_UPDATEABLE_SYNONYMS) && updateable + ); this.analysisMode = updateable ? AnalysisMode.SEARCH_TIME : AnalysisMode.ALL; this.environment = env; this.synonymsManagementAPIService = synonymsManagementAPIService; @@ -90,8 +192,8 @@ public TokenFilterFactory getChainAwareTokenFilterFactory( Function allFilters ) { final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters); - ReaderWithOrigin rulesFromSettings = getRulesFromSettings(environment, context); - final SynonymMap synonyms = buildSynonyms(analyzer, rulesFromSettings); + ReaderWithOrigin rulesReader = synonymsSource.getRulesReader(this, context); + final SynonymMap synonyms = buildSynonyms(analyzer, rulesReader); final String name = name(); return new TokenFilterFactory() { @Override @@ -119,7 +221,7 @@ public AnalysisMode getAnalysisMode() { @Override public String getResourceName() { - return rulesFromSettings.resource(); + return rulesReader.resource(); } }; } @@ -152,43 +254,6 @@ SynonymMap buildSynonyms(Analyzer analyzer, ReaderWithOrigin rules) { } } - protected ReaderWithOrigin getRulesFromSettings(Environment env, IndexCreationContext context) { - if (settings.getAsList("synonyms", null) != null) { - List rulesList = Analysis.getWordList(env, settings, "synonyms"); - StringBuilder sb = new StringBuilder(); - for (String line : rulesList) { - sb.append(line).append(System.lineSeparator()); - } - return new ReaderWithOrigin(new StringReader(sb.toString()), "'" + name() + "' analyzer settings"); - } else if (settings.get("synonyms_set") != null) { - if (analysisMode != AnalysisMode.SEARCH_TIME) { - throw new IllegalArgumentException( - "Can't apply [synonyms_set]! " + "Loading synonyms from index is supported only for search time synonyms!" - ); - } - String synonymsSet = settings.get("synonyms_set", null); - // provide fake synonyms on index creation and index metadata checks to ensure that we - // don't block a master thread - if (context != IndexCreationContext.RELOAD_ANALYZERS) { - return new ReaderWithOrigin( - new StringReader("fake rule => fake"), - "fake [" + synonymsSet + "] synonyms_set in .synonyms index", - synonymsSet - ); - } - return new ReaderWithOrigin( - Analysis.getReaderFromIndex(synonymsSet, synonymsManagementAPIService), - "[" + synonymsSet + "] synonyms_set in .synonyms index", - synonymsSet - ); - } else if (settings.get("synonyms_path") != null) { - String synonyms_path = settings.get("synonyms_path", null); - return new ReaderWithOrigin(Analysis.getReaderFromFile(env, synonyms_path, "synonyms_path"), synonyms_path); - } else { - throw new IllegalArgumentException("synonym requires either `synonyms`, `synonyms_set` or `synonyms_path` to be configured"); - } - } - record ReaderWithOrigin(Reader reader, String origin, String resource) { ReaderWithOrigin(Reader reader, String origin) { this(reader, origin, null); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 8758c2478e873..7a2bd2a822988 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,6 +37,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.Set; +import java.util.function.BiConsumer; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -78,7 +80,7 @@ public void testSynonymsAnalysis() throws IOException { } public void testSynonymWordDeleteByAnalyzer() throws IOException { - Settings settings = Settings.builder() + Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_synonym.type", "synonym") @@ -86,17 +88,49 @@ public void testSynonymWordDeleteByAnalyzer() throws IOException { .put("index.analysis.filter.stop_within_synonym.type", "stop") .putList("index.analysis.filter.stop_within_synonym.stopwords", "kimchy", "elasticsearch") .put("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.tokenizer", "whitespace") - .putList("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.filter", "stop_within_synonym", "my_synonym") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try { + .putList("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.filter", "stop_within_synonym", "my_synonym"); + + CheckedBiConsumer assertIsLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.my_synonym.updateable", updateable) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; - fail("fail! due to synonym word deleted by analyzer"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("failed to build synonyms")); - assertThat(e.getMessage(), containsString("['my_synonym' analyzer settings]")); - } + match("synonymAnalyzerWithStopSynonymBeforeSynonym", "kimchy is the dude abides", "is the dude man!"); + }; + + BiConsumer assertIsNotLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.my_synonym.updateable", updateable) + .build(); + try { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + fail("fail! due to synonym word deleted by analyzer"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("failed to build synonyms")); + assertThat(e.getMessage(), containsString("['my_synonym' analyzer settings]")); + } + }; + + // Test with an index version where lenient should always be false by default + IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.INDEX_SORTING_ON_NESTED + ); + assertIsNotLenient.accept(randomNonLenientIndexVersion, false); + assertIsNotLenient.accept(randomNonLenientIndexVersion, true); + + // Test with an index version where the default lenient value is based on updateable + IndexVersion randomLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.LENIENT_UPDATEABLE_SYNONYMS, + IndexVersion.current() + ); + assertIsNotLenient.accept(randomLenientIndexVersion, false); + assertIsLenient.accept(randomLenientIndexVersion, true); } public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { @@ -106,7 +140,7 @@ public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { Files.createDirectory(config); Files.copy(synonyms, config.resolve("synonyms.txt")); - Settings settings = Settings.builder() + Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put("path.home", home) .put("index.analysis.filter.my_synonym.type", "synonym") @@ -114,20 +148,52 @@ public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { .put("index.analysis.filter.stop_within_synonym.type", "stop") .putList("index.analysis.filter.stop_within_synonym.stopwords", "kimchy", "elasticsearch") .put("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.tokenizer", "whitespace") - .putList("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.filter", "stop_within_synonym", "my_synonym") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try { + .putList("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.filter", "stop_within_synonym", "my_synonym"); + + CheckedBiConsumer assertIsLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.my_synonym.updateable", updateable) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; - fail("fail! due to synonym word deleted by analyzer"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), equalTo("failed to build synonyms from [synonyms.txt]")); - } + match("synonymAnalyzerWithStopSynonymBeforeSynonym", "kimchy is the dude abides", "is the dude man!"); + }; + + BiConsumer assertIsNotLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.my_synonym.updateable", updateable) + .build(); + try { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + fail("fail! due to synonym word deleted by analyzer"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("failed to build synonyms from [synonyms.txt]")); + } + }; + + // Test with an index version where lenient should always be false by default + IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.INDEX_SORTING_ON_NESTED + ); + assertIsNotLenient.accept(randomNonLenientIndexVersion, false); + assertIsNotLenient.accept(randomNonLenientIndexVersion, true); + + // Test with an index version where the default lenient value is based on updateable + IndexVersion randomLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.LENIENT_UPDATEABLE_SYNONYMS, + IndexVersion.current() + ); + assertIsNotLenient.accept(randomLenientIndexVersion, false); + assertIsLenient.accept(randomLenientIndexVersion, true); } public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { - Settings settings = Settings.builder() + Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonym_expand.type", "synonym") @@ -135,17 +201,49 @@ public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { .put("index.analysis.filter.stop_within_synonym.type", "stop") .putList("index.analysis.filter.stop_within_synonym.stopwords", "kimchy", "elasticsearch") .put("index.analysis.analyzer.synonymAnalyzerExpandWithStopBeforeSynonym.tokenizer", "whitespace") - .putList("index.analysis.analyzer.synonymAnalyzerExpandWithStopBeforeSynonym.filter", "stop_within_synonym", "synonym_expand") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - try { + .putList("index.analysis.analyzer.synonymAnalyzerExpandWithStopBeforeSynonym.filter", "stop_within_synonym", "synonym_expand"); + + CheckedBiConsumer assertIsLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.synonym_expand.updateable", updateable) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; - fail("fail! due to synonym word deleted by analyzer"); - } catch (Exception e) { - assertThat(e, instanceOf(IllegalArgumentException.class)); - assertThat(e.getMessage(), startsWith("failed to build synonyms")); - assertThat(e.getMessage(), containsString("['synonym_expand' analyzer settings]")); - } + match("synonymAnalyzerExpandWithStopBeforeSynonym", "kimchy is the dude abides", "is the dude abides man!"); + }; + + BiConsumer assertIsNotLenient = (iv, updateable) -> { + Settings settings = settingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, iv) + .put("index.analysis.filter.synonym_expand.updateable", updateable) + .build(); + try { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; + fail("fail! due to synonym word deleted by analyzer"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("failed to build synonyms")); + assertThat(e.getMessage(), containsString("['synonym_expand' analyzer settings]")); + } + }; + + // Test with an index version where lenient should always be false by default + IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.INDEX_SORTING_ON_NESTED + ); + assertIsNotLenient.accept(randomNonLenientIndexVersion, false); + assertIsNotLenient.accept(randomNonLenientIndexVersion, true); + + // Test with an index version where the default lenient value is based on updateable + IndexVersion randomLenientIndexVersion = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.LENIENT_UPDATEABLE_SYNONYMS, + IndexVersion.current() + ); + assertIsNotLenient.accept(randomLenientIndexVersion, false); + assertIsLenient.accept(randomLenientIndexVersion, true); } public void testSynonymsWrappedByMultiplexer() throws IOException { @@ -272,7 +370,6 @@ public void testTokenFiltersBypassSynonymAnalysis() throws IOException { for (String factory : bypassingFactories) { TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); - SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings, null); Analyzer analyzer = SynonymTokenFilterFactory.buildSynonymAnalyzer( tok, Collections.emptyList(), @@ -348,7 +445,6 @@ public void testDisallowedTokenFilters() throws IOException { for (String factory : disallowedFactories) { TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings); TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings); - SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings, null); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -372,7 +468,7 @@ private void match(String analyzerName, String source, String target) throws IOE sb.append(termAtt.toString()).append(" "); } - MatcherAssert.assertThat(target, equalTo(sb.toString().trim())); + MatcherAssert.assertThat(sb.toString().trim(), equalTo(target)); } } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index 382fc9417eac0..831e2f19e0126 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -38,6 +38,7 @@ import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.concurrent.locks.ReentrantLock; @@ -96,11 +97,11 @@ public DoubleCounter getDoubleCounter(String name) { } @Override - public DoubleAsyncCounter registerDoubleAsyncCounter( + public DoubleAsyncCounter registerDoublesAsyncCounter( String name, String description, String unit, - Supplier observer + Supplier> observer ) { try (ReleasableLock lock = registerLock.acquire()) { return register(doubleAsynchronousCounters, new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); @@ -125,7 +126,12 @@ public DoubleUpDownCounter getDoubleUpDownCounter(String name) { } @Override - public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { + public DoubleGauge registerDoublesGauge( + String name, + String description, + String unit, + Supplier> observer + ) { try (ReleasableLock lock = registerLock.acquire()) { return register(doubleGauges, new DoubleGaugeAdapter(meter, name, description, unit, observer)); } @@ -156,7 +162,12 @@ public LongCounter registerLongCounter(String name, String description, String u } @Override - public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { + public LongAsyncCounter registerLongsAsyncCounter( + String name, + String description, + String unit, + Supplier> observer + ) { try (ReleasableLock lock = registerLock.acquire()) { return register(longAsynchronousCounters, new LongAsyncCounterAdapter(meter, name, description, unit, observer)); } @@ -185,7 +196,7 @@ public LongUpDownCounter getLongUpDownCounter(String name) { } @Override - public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { + public LongGauge registerLongsGauge(String name, String description, String unit, Supplier> observer) { try (ReleasableLock lock = registerLock.acquire()) { return register(longGauges, new LongGaugeAdapter(meter, name, description, unit, observer)); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java index 6b17a83619ef7..ab735c41ca890 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java @@ -15,12 +15,19 @@ import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; public class DoubleAsyncCounterAdapter extends AbstractInstrument implements DoubleAsyncCounter { - public DoubleAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + public DoubleAsyncCounterAdapter( + Meter meter, + String name, + String description, + String unit, + Supplier> observer + ) { super(meter, new Builder(name, description, unit, observer)); } @@ -30,9 +37,9 @@ public void close() throws Exception { } private static class Builder extends AbstractInstrument.Builder { - private final Supplier observer; + private final Supplier> observer; - private Builder(String name, String description, String unit, Supplier observer) { + private Builder(String name, String description, String unit, Supplier> observer) { super(name, description, unit); this.observer = Objects.requireNonNull(observer); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java index ed6ecee66d696..2a9c2d45981ed 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java @@ -14,6 +14,7 @@ import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; @@ -24,7 +25,13 @@ public class DoubleGaugeAdapter extends AbstractInstrument observer) { + public DoubleGaugeAdapter( + Meter meter, + String name, + String description, + String unit, + Supplier> observer + ) { super(meter, new Builder(name, description, unit, observer)); } @@ -34,9 +41,9 @@ public void close() throws Exception { } private static class Builder extends AbstractInstrument.Builder { - private final Supplier observer; + private final Supplier> observer; - private Builder(String name, String description, String unit, Supplier observer) { + private Builder(String name, String description, String unit, Supplier> observer) { super(name, description, unit); this.observer = Objects.requireNonNull(observer); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java index 14c58139d03e1..1bc21ef2c831c 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java @@ -15,12 +15,19 @@ import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; public class LongAsyncCounterAdapter extends AbstractInstrument implements LongAsyncCounter { - public LongAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + public LongAsyncCounterAdapter( + Meter meter, + String name, + String description, + String unit, + Supplier> observer + ) { super(meter, new Builder(name, description, unit, observer)); } @@ -30,9 +37,9 @@ public void close() throws Exception { } private static class Builder extends AbstractInstrument.Builder { - private final Supplier observer; + private final Supplier> observer; - private Builder(String name, String description, String unit, Supplier observer) { + private Builder(String name, String description, String unit, Supplier> observer) { super(name, description, unit); this.observer = Objects.requireNonNull(observer); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java index 52c19c80c284f..eab9ed2eb5278 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java @@ -14,6 +14,7 @@ import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Objects; import java.util.function.Supplier; @@ -21,7 +22,7 @@ * LongGaugeAdapter wraps an otel ObservableLongGauge */ public class LongGaugeAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongGauge { - public LongGaugeAdapter(Meter meter, String name, String description, String unit, Supplier observer) { + public LongGaugeAdapter(Meter meter, String name, String description, String unit, Supplier> observer) { super(meter, new Builder(name, description, unit, observer)); } @@ -31,11 +32,11 @@ public void close() throws Exception { } private static class Builder extends AbstractInstrument.Builder { - private final Supplier observer; + private final Supplier> observer; - private Builder(String name, String description, String unit, Supplier observer) { + private Builder(String name, String description, String unit, Supplier> observer) { super(name, description, unit); - this.observer = Objects.requireNonNull(observer); + this.observer = observer; } @Override diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java index 3e8ab415bd25e..1d760c8c12791 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java @@ -17,6 +17,7 @@ import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Map; import java.util.function.Consumer; import java.util.function.Supplier; @@ -53,37 +54,45 @@ static Attributes fromMap(Map attributes) { return builder.build(); } - static Consumer doubleMeasurementCallback(Supplier observer) { + static Consumer doubleMeasurementCallback(Supplier> observer) { return measurement -> { - DoubleWithAttributes observation; + Collection observations; try { - observation = observer.get(); + observations = observer.get(); } catch (RuntimeException err) { assert false : "observer must not throw [" + err.getMessage() + "]"; logger.error("doubleMeasurementCallback observer unexpected error", err); return; } - if (observation == null) { + if (observations == null) { return; } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + for (DoubleWithAttributes observation : observations) { + if (observation != null) { + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + } + } }; } - static Consumer longMeasurementCallback(Supplier observer) { + static Consumer longMeasurementCallback(Supplier> observer) { return measurement -> { - LongWithAttributes observation; + Collection observations; try { - observation = observer.get(); + observations = observer.get(); } catch (RuntimeException err) { assert false : "observer must not throw [" + err.getMessage() + "]"; logger.error("longMeasurementCallback observer unexpected error", err); return; } - if (observation == null) { + if (observations == null) { return; } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + for (LongWithAttributes observation : observations) { + if (observation != null) { + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + } + } }; } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index a4c9a9d3e1c67..ed09bd2462bb3 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -112,12 +112,8 @@ public void resetClusterSetting() { public void testRolloverOnAutoShardCondition() throws Exception { final String dataStreamName = "logs-es"; - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); indexDocs(dataStreamName, randomIntBetween(100, 200)); @@ -277,12 +273,8 @@ public void testReduceShardsOnRollover() throws IOException { final String dataStreamName = "logs-es"; // start with 3 shards - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); indexDocs(dataStreamName, randomIntBetween(100, 200)); @@ -391,12 +383,8 @@ public void testReduceShardsOnRollover() throws IOException { public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException { final String dataStreamName = "logs-es"; - putComposableIndexTemplate( - "my-template", - List.of("logs-*"), - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() - ); - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build()); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); indexDocs(dataStreamName, randomIntBetween(100, 200)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index f79eea8676b3e..e3da69b7b2f0b 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -14,6 +14,8 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.RequestBuilder; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -151,14 +153,18 @@ protected Collection> nodePlugins() { public void testBasicScenario() throws Exception { List backingIndices = new ArrayList<>(4); putComposableIndexTemplate("id1", List.of("metrics-foo*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); putComposableIndexTemplate("id2", List.of("metrics-bar*")); - createDataStreamRequest = new CreateDataStreamAction.Request("metrics-bar"); + createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "metrics-bar"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); getDataStreamResponse.getDataStreams().sort(Comparator.comparing(dataStreamInfo -> dataStreamInfo.getDataStream().getName())); @@ -228,7 +234,7 @@ public void testBasicScenario() throws Exception { verifyDocs("metrics-bar", numDocsBar + numDocsBar2, 1, 2); verifyDocs("metrics-foo", numDocsFoo + numDocsFoo2, 1, 2); - DeleteDataStreamAction.Request deleteDataStreamRequest = new DeleteDataStreamAction.Request("metrics-*"); + DeleteDataStreamAction.Request deleteDataStreamRequest = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "metrics-*"); client().execute(DeleteDataStreamAction.INSTANCE, deleteDataStreamRequest).actionGet(); getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(0)); @@ -245,7 +251,11 @@ public void testBasicScenario() throws Exception { public void testOtherWriteOps() throws Exception { putComposableIndexTemplate("id", List.of("metrics-foobar*")); String dataStreamName = "metrics-foobar"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); { @@ -311,9 +321,16 @@ public void testOtherWriteOps() throws Exception { { // TODO: remove when fixing the bug when an index matching a backing index name is created before the data stream is created - createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName + "-baz"); + createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + "-baz" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName + "-baz" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName + "-baz" } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -451,7 +468,7 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except indexDocs(dataStreamName, numDocs); verifyDocs(dataStreamName, numDocs, 1, 1); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -485,13 +502,13 @@ public void testComposableTemplateOnlyMatchingWithDataStreamName() throws Except indexDocs(dataStreamName, numDocs2); verifyDocs(dataStreamName, numDocs + numDocs2, 1, 2); - getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); - DeleteDataStreamAction.Request deleteDataStreamRequest = new DeleteDataStreamAction.Request(dataStreamName); + DeleteDataStreamAction.Request deleteDataStreamRequest = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, dataStreamName); client().execute(DeleteDataStreamAction.INSTANCE, deleteDataStreamRequest).actionGet(); getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(0)); @@ -538,7 +555,11 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { public void testResolvabilityOfDataStreamsInAPIs() throws Exception { putComposableIndexTemplate("id", List.of("logs-*")); String dataStreamName = "logs-foobar"; - CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request request = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest(); String aliasToDataStream = "logs"; @@ -583,10 +604,12 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true); verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true); - verifyResolvability(dataStreamName, clusterAdmin().prepareSearchShards(dataStreamName), false); + verifyResolvability( + client().execute(TransportClusterSearchShardsAction.TYPE, new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, dataStreamName)) + ); verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(dataStreamName))); - request = new CreateDataStreamAction.Request("logs-barbaz"); + request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-barbaz"); client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); verifyResolvability( "logs-barbaz", @@ -627,16 +650,29 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false); - verifyResolvability(wildcardExpression, clusterAdmin().prepareSearchShards(wildcardExpression), false); + verifyResolvability( + client().execute( + TransportClusterSearchShardsAction.TYPE, + new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, wildcardExpression) + ) + ); verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(wildcardExpression))); } public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exception { putComposableIndexTemplate("id", List.of("metrics-foobar*")); String dataStreamName = "metrics-foobar-baz"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName + "-eggplant"); + createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + "-eggplant" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); TransportDeleteComposableIndexTemplateAction.Request req = new TransportDeleteComposableIndexTemplateAction.Request("id"); @@ -689,7 +725,11 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio public void testAliasActionsOnDataStreams() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-foo*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(dataStreamName).aliases("foo"); @@ -828,7 +868,11 @@ public void testRandomDataSteamAliasesUpdate() throws Exception { .distinct() .toArray(String[]::new); for (String dataStream : dataStreams) { - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStream + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } Map indexFilters = Map.of("term", Map.of("type", Map.of("value", "y"))); @@ -867,7 +911,11 @@ public void testDataSteamAliasWithMalformedFilter() throws Exception { String alias = randomAlphaOfLength(4); String dataStream = "log-" + randomAlphaOfLength(4).toLowerCase(Locale.ROOT); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStream + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); AliasActions addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias).indices(dataStream); @@ -890,7 +938,11 @@ public void testDataSteamAliasWithMalformedFilter() throws Exception { public void testAliasActionsFailOnDataStreamBackingIndices() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-foo*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); String backingIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1); @@ -915,7 +967,11 @@ public void testAddDataStreamAliasesMixedExpressionValidation() throws Exception createIndex("metrics-myindex"); putComposableIndexTemplate("id1", List.of("metrics-*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias"); @@ -929,7 +985,11 @@ public void testRemoveDataStreamAliasesMixedExpression() throws Exception { createIndex("metrics-myindex"); putComposableIndexTemplate("id1", List.of("metrics-*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); @@ -960,7 +1020,11 @@ public void testRemoveDataStreamAliasesMixedExpression() throws Exception { public void testUpdateDataStreamsWithWildcards() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); @@ -1003,7 +1067,11 @@ public void testUpdateDataStreamsWithWildcards() throws Exception { public void testDataStreamAliasesUnsupportedParametersValidation() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-*")); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); { AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias").routing("[routing]"); @@ -1056,9 +1124,16 @@ public void testTimestampFieldCustomAttributes() throws Exception { }"""; putComposableIndexTemplate("id1", mapping, List.of("logs-foo*"), null, null); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("logs-foobar"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "logs-foobar" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "logs-foobar" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { "logs-foobar" } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1073,10 +1148,14 @@ public void testTimestampFieldCustomAttributes() throws Exception { public void testUpdateMappingViaDataStream() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("logs-foobar"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "logs-foobar" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1118,10 +1197,17 @@ public void testUpdateMappingViaDataStream() throws Exception { public void testUpdateIndexSettingsViaDataStream() throws Exception { putComposableIndexTemplate("id1", List.of("logs-*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("logs-foobar"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "logs-foobar" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "logs-foobar" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { "logs-foobar" } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1255,11 +1341,15 @@ public void testIndexDocsWithCustomRoutingTargetingBackingIndex() throws Excepti public void testSearchAllResolvesDataStreams() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-foo*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); putComposableIndexTemplate("id2", List.of("metrics-bar*")); - createDataStreamRequest = new CreateDataStreamAction.Request("metrics-bar"); + createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "metrics-bar"); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); int numDocsBar = randomIntBetween(2, 16); @@ -1287,7 +1377,7 @@ public void testGetDataStream() throws Exception { GetDataStreamAction.Response response = client().execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "metrics-foo" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "metrics-foo" }) ).actionGet(); assertThat(response.getDataStreams().size(), is(1)); DataStreamInfo metricsFooDataStream = response.getDataStreams().get(0); @@ -1315,7 +1405,11 @@ private static void assertBackingIndex(String backingIndex, String timestampFiel public void testNoTimestampInDocument() throws Exception { putComposableIndexTemplate("id", List.of("logs-foobar*")); String dataStreamName = "logs-foobar"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create").source("{}", XContentType.JSON); @@ -1326,7 +1420,11 @@ public void testNoTimestampInDocument() throws Exception { public void testMultipleTimestampValuesInDocument() throws Exception { putComposableIndexTemplate("id", List.of("logs-foobar*")); String dataStreamName = "logs-foobar"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create") @@ -1374,7 +1472,7 @@ public void testMixedAutoCreate() throws Exception { bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false)); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamsResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamsResponse.getDataStreams(), hasSize(4)); @@ -1391,7 +1489,7 @@ public void testMixedAutoCreate() throws Exception { assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo2")); assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo3")); - DeleteDataStreamAction.Request deleteDSReq = new DeleteDataStreamAction.Request(new String[] { "*" }); + DeleteDataStreamAction.Request deleteDSReq = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); client().execute(DeleteDataStreamAction.INSTANCE, deleteDSReq).actionGet(); TransportDeleteComposableIndexTemplateAction.Request deleteTemplateRequest = new TransportDeleteComposableIndexTemplateAction.Request("*"); @@ -1412,7 +1510,7 @@ public void testAutoCreateV1TemplateNoDataStream() { BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat("bulk failures: " + Strings.toString(bulkResponse), bulkResponse.hasFailures(), is(false)); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamsResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0)); @@ -1430,7 +1528,12 @@ public void testCreatingDataStreamAndFirstBackingIndexExistsFails() throws Excep createIndex(backingIndex); putComposableIndexTemplate("id", List.of("logs-*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName, now); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName, + now + ); Exception e = expectThrows( ElasticsearchStatusException.class, client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) @@ -1440,7 +1543,11 @@ public void testCreatingDataStreamAndFirstBackingIndexExistsFails() throws Excep public void testQueryDataStreamNameInIndexField() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs("metrics-foo", 1); @@ -1454,10 +1561,14 @@ public void testQueryDataStreamNameInIndexField() throws Exception { public void testDataStreamMetadata() throws Exception { Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); putComposableIndexTemplate("id1", null, List.of("logs-*"), settings, Map.of("managed_by", "core-features")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("logs-foobar"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "logs-foobar" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); getDataStreamResponse.getDataStreams().sort(Comparator.comparing(dataStreamInfo -> dataStreamInfo.getDataStream().getName())); @@ -1475,7 +1586,11 @@ public void testDataStreamMetadata() throws Exception { public void testClusterStateIncludeDataStream() throws Exception { putComposableIndexTemplate("id1", List.of("metrics-foo*")); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); // when querying a backing index then the data stream should be included as well. @@ -1528,7 +1643,10 @@ public void testMultiThreadedRollover() throws Exception { assertBusy(() -> { try { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "potato-biscuit" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { "potato-biscuit" } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); String newBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getWriteIndex().getName(); @@ -1552,7 +1670,10 @@ public void testMultiThreadedRollover() throws Exception { }); // We should *NOT* have a third index, it should have rolled over *exactly* once - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "potato-biscuit" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { "potato-biscuit" } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); @@ -1565,7 +1686,11 @@ public void testMultiThreadedRollover() throws Exception { public void testSegmentsSortedOnTimestampDesc() throws Exception { Settings settings = indexSettings(1, 0).build(); putComposableIndexTemplate("template_for_foo", null, List.of("metrics-foo*"), settings, null); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); // We index data in the increasing order of @timestamp field @@ -1602,7 +1727,7 @@ public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { // Important detail: create template with data stream template after the index has been created DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); - var request = new CreateDataStreamAction.Request("my-alias"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-alias"); var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("[my-alias (alias of [")); assertThat(e.getMessage(), containsString("]) conflicts with data stream")); @@ -1615,7 +1740,7 @@ public void testCreateDataStreamWithSameNameAsIndex() throws Exception { // Important detail: create template with data stream template after the index has been created DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); - var request = new CreateDataStreamAction.Request("my-index"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-index"); var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream [my-index] conflicts with index")); } @@ -1623,18 +1748,20 @@ public void testCreateDataStreamWithSameNameAsIndex() throws Exception { public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception { { DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); - var request = new CreateDataStreamAction.Request("my-ds"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-ds"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("my-ds").aliases("my-alias")); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - var request2 = new CreateDataStreamAction.Request("my-alias"); + var request2 = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-alias"); var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } { - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + assertAcked( + client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet() + ); DataStreamIT.putComposableIndexTemplate( "my-template", null, @@ -1645,10 +1772,10 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception null, false ); - var request = new CreateDataStreamAction.Request("my-ds"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-ds"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); - var request2 = new CreateDataStreamAction.Request("my-alias"); + var request2 = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-alias"); var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } @@ -1660,7 +1787,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception CreateIndexRequest createIndexRequest = new CreateIndexRequest("es-logs").alias(new Alias("logs")); assertAcked(indicesAdmin().create(createIndexRequest).actionGet()); - var request = new CreateDataStreamAction.Request("logs-es"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-es"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); @@ -1668,7 +1795,9 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } { - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + assertAcked( + client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet() + ); DataStreamIT.putComposableIndexTemplate( "my-template", null, @@ -1680,7 +1809,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception false ); - var request = new CreateDataStreamAction.Request("logs-es"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-es"); var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } @@ -1693,7 +1822,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { assertAcked(indicesAdmin().create(createIndexRequest).actionGet()); { - var request = new CreateDataStreamAction.Request("logs-es"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-es"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); @@ -1704,7 +1833,9 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { ); } { - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + assertAcked( + client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet() + ); var e = expectThrows( IllegalArgumentException.class, () -> DataStreamIT.putComposableIndexTemplate( @@ -1728,7 +1859,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { public void testCreateIndexWithSameNameAsDataStreamAlias() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); - var request = new CreateDataStreamAction.Request("logs-es"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-es"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); @@ -1742,7 +1873,7 @@ public void testCreateIndexWithSameNameAsDataStreamAlias() throws Exception { public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); - var request = new CreateDataStreamAction.Request("logs-es"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "logs-es"); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); @@ -1766,7 +1897,7 @@ public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception public void testRemoveGhostReference() throws Exception { String dataStreamName = "logs-es"; DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); - var request = new CreateDataStreamAction.Request(dataStreamName); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); assertAcked(indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); var indicesStatsResponse = indicesAdmin().stats(new IndicesStatsRequest()).actionGet(); @@ -1818,7 +1949,11 @@ public void onFailure(Exception e) { assertAcked( client().execute( ModifyDataStreamsAction.INSTANCE, - new ModifyDataStreamsAction.Request(List.of(DataStreamAction.removeBackingIndex(dataStreamName, ghostReference.getName()))) + new ModifyDataStreamsAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + List.of(DataStreamAction.removeBackingIndex(dataStreamName, ghostReference.getName())) + ) ) ); ClusterState after = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); @@ -2042,7 +2177,11 @@ public void testSearchWithRouting() throws IOException, ExecutionException, Inte TransportPutComposableIndexTemplateAction.TYPE, new TransportPutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) ).actionGet(); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("my-logs"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "my-logs" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); assertResponse(prepareSearch("my-logs").setRouting("123"), resp -> { assertEquals(resp.getTotalShards(), 4); }); @@ -2054,7 +2193,7 @@ public void testWriteIndexWriteLoadAndAvgShardSizeIsStoredAfterRollover() throws final int numberOfReplicas = randomIntBetween(0, 1); final var indexSettings = indexSettings(numberOfShards, numberOfReplicas).build(); DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null); - final var request = new CreateDataStreamAction.Request(dataStreamName); + final var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); indexDocsAndEnsureThereIsCapturedWriteLoad(dataStreamName); @@ -2102,7 +2241,7 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception final var indexSettings = indexSettings(2, 1).put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes)) .build(); DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null); - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); ensureGreen(dataStreamName); @@ -2173,7 +2312,7 @@ public void testNoShardSizeIsForecastedWhenAllShardStatRequestsFail() throws Exc final var indexSettings = indexSettings(1, 0).put("index.routing.allocation.require._name", dataOnlyNode).build(); DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null); - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); for (int i = 0; i < 10; i++) { @@ -2211,7 +2350,7 @@ public void testShardSizeIsForecastedDuringRollover() throws Exception { final int numberOfReplicas = randomIntBetween(0, 1); final var indexSettings = indexSettings(numberOfShards, numberOfReplicas).build(); DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null); - final var request = new CreateDataStreamAction.Request(dataStreamName); + final var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); for (int i = 0; i < 4; i++) { diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamMigrationIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamMigrationIT.java index bebdc085760d4..5b5f3920f019b 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamMigrationIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamMigrationIT.java @@ -71,7 +71,10 @@ public void testBasicMigration() throws Exception { assertThat(resolveResponse.getDataStreams().size(), equalTo(0)); assertThat(resolveResponse.getIndices().size(), equalTo(2)); - client().execute(MigrateToDataStreamAction.INSTANCE, new MigrateToDataStreamAction.Request(alias)).get(); + client().execute( + MigrateToDataStreamAction.INSTANCE, + new MigrateToDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, alias) + ).get(); resolveResponse = indicesAdmin().resolveIndex(resolveRequest).get(); assertThat(resolveResponse.getAliases().size(), equalTo(0)); @@ -112,7 +115,10 @@ public void testMigrationWithoutTemplate() throws Exception { Exception e = expectThrows( Exception.class, - () -> client().execute(MigrateToDataStreamAction.INSTANCE, new MigrateToDataStreamAction.Request(alias)).get() + () -> client().execute( + MigrateToDataStreamAction.INSTANCE, + new MigrateToDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, alias) + ).get() ); assertTrue( @@ -144,7 +150,10 @@ public void testMigrationWithoutIndexMappings() throws Exception { Exception e = expectThrows( Exception.class, - () -> client().execute(MigrateToDataStreamAction.INSTANCE, new MigrateToDataStreamAction.Request(alias)).get() + () -> client().execute( + MigrateToDataStreamAction.INSTANCE, + new MigrateToDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, alias) + ).get() ); assertTrue(throwableOrItsCause(e, IllegalArgumentException.class, "must have mappings for a timestamp field")); @@ -179,7 +188,10 @@ public void testMigrationWithoutTimestampMapping() throws Exception { Exception e = expectThrows( Exception.class, - () -> client().execute(MigrateToDataStreamAction.INSTANCE, new MigrateToDataStreamAction.Request(alias)).get() + () -> client().execute( + MigrateToDataStreamAction.INSTANCE, + new MigrateToDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, alias) + ).get() ); assertTrue(throwableOrItsCause(e, IllegalArgumentException.class, "data stream timestamp field [@timestamp] does not exist")); @@ -214,7 +226,10 @@ public void testMigrationWithoutWriteIndex() throws Exception { Exception e = expectThrows( Exception.class, - () -> client().execute(MigrateToDataStreamAction.INSTANCE, new MigrateToDataStreamAction.Request(alias)).get() + () -> client().execute( + MigrateToDataStreamAction.INSTANCE, + new MigrateToDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, alias) + ).get() ); assertTrue(throwableOrItsCause(e, IllegalArgumentException.class, "alias [" + alias + "] must specify a write index")); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 412410c4e4c7c..d0dd2888f535f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -116,15 +116,15 @@ public void setup() throws Exception { } }""", List.of("with-fs"), null, null, null, null, true); - CreateDataStreamAction.Request request = new CreateDataStreamAction.Request("ds"); + CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "ds"); AcknowledgedResponse response = client.execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); - request = new CreateDataStreamAction.Request("other-ds"); + request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "other-ds"); response = client.execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); - request = new CreateDataStreamAction.Request("with-fs"); + request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "with-fs"); response = client.execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); @@ -140,7 +140,7 @@ public void setup() throws Exception { // Resolve backing index names after data streams have been created: // (these names have a date component, and running around midnight could lead to test failures otherwise) - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); dsBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); otherDsBackingIndexName = getDataStreamResponse.getDataStreams().get(1).getDataStream().getIndices().get(0).getName(); @@ -193,7 +193,9 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(Collections.singletonList(dsBackingIndexName), getSnapshot(REPO, SNAPSHOT).indices()); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" }))); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" })) + ); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() @@ -213,7 +215,7 @@ public void testSnapshotAndRestore() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "ds" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -271,7 +273,7 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get(); assertThat( ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), @@ -330,7 +332,7 @@ public void testSnapshotAndRestoreInPlace() { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "ds" }); + GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).actionGet(); assertThat( ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), @@ -363,7 +365,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(fsBackingIndexName, fsFailureIndexName)); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("with-fs"))); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "with-fs"))); { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -377,7 +379,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "with-fs" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -399,7 +401,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "with-fs2" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs2" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -450,7 +452,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio ); } - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT); @@ -472,7 +474,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { dataStreamToSnapshot }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamToSnapshot }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -494,7 +496,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio ) ); - DeleteDataStreamAction.Request r = new DeleteDataStreamAction.Request(new String[] { dataStreamToSnapshot }); + DeleteDataStreamAction.Request r = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamToSnapshot }); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, r).get()); } @@ -510,7 +512,10 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName) ); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get()); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" })) + .get() + ); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) @@ -528,7 +533,7 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "*" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) ).get(); assertEquals(3, ds.getDataStreams().size()); assertThat( @@ -555,7 +560,7 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { equalTo("{\"match_all\":{\"boost\":1.0}}") ); - DeleteDataStreamAction.Request r = new DeleteDataStreamAction.Request(new String[] { "*" }); + DeleteDataStreamAction.Request r = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, r).get()); } @@ -571,7 +576,7 @@ public void testSnapshotAndRestoreAll() throws Exception { containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName) ); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) @@ -588,7 +593,7 @@ public void testSnapshotAndRestoreAll() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "*" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) ).get(); assertEquals(3, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -621,7 +626,12 @@ public void testSnapshotAndRestoreAll() throws Exception { equalTo("{\"match_all\":{\"boost\":1.0}}") ); - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).get()); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) + ).get() + ); } public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { @@ -636,7 +646,7 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { containsInAnyOrder(dsBackingIndexName, otherDsBackingIndexName, fsBackingIndexName, fsFailureIndexName) ); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).get()); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN)); var restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT).waitForCompletion(true) @@ -654,7 +664,7 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "*" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) ).get(); assertEquals(3, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -668,7 +678,12 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("*")).actionGet(); assertThat(getAliasesResponse.getDataStreamAliases(), anEmptyMap()); - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).get()); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) + ).get() + ); } public void testRename() throws Exception { @@ -703,7 +718,7 @@ public void testRename() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "ds2" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -761,7 +776,7 @@ public void testRenameWriteDataStream() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "other-ds2" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "other-ds2" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -814,7 +829,8 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { ); // delete data stream - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).actionGet(); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" })) + .actionGet(); // restore data stream attempting to rename the backing index RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -828,7 +844,7 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[] { "ds" }); + GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } @@ -867,7 +883,7 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); // assert "ds" was restored as "test-ds" and the backing index has a valid name - GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(new String[] { "test-ds" }); + GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "test-ds" }); GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getRenamedDS).actionGet(); assertThat( response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), @@ -875,7 +891,7 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { ); // data stream "ds" should still exist in the system - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(new String[] { "ds" }); + GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } @@ -905,7 +921,7 @@ public void testWildcards() throws Exception { GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(new String[] { "ds2" }) + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) ).get(); assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); @@ -946,7 +962,9 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" }))); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" })) + ); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() @@ -957,7 +975,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "ds" }); + GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); expectThrows(ResourceNotFoundException.class, client.execute(GetDataStreamAction.INSTANCE, getRequest)); } @@ -972,7 +990,12 @@ public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionExcepti .get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), Matchers.is(SnapshotState.SUCCESS)); - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" }))); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) + ) + ); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( TEST_REQUEST_TIMEOUT, @@ -1014,7 +1037,10 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { assertDocCount(dataStream, 100L); // Resolve backing index name after the data stream has been created because it has a date component, // and running around midnight could lead to test failures otherwise - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStream }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStream } + ); GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); String backingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); @@ -1032,7 +1058,10 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { // non-partial snapshots do not allow delete operations on data streams where snapshot has not been completed try { logger.info("--> delete index while non-partial snapshot is running"); - client1.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { dataStream })).actionGet(); + client1.execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStream }) + ).actionGet(); fail("Expected deleting index to fail during snapshot"); } catch (SnapshotInProgressException e) { assertThat(e.getMessage(), containsString("Cannot delete data streams that are being snapshotted: [" + dataStream)); @@ -1139,7 +1168,12 @@ public void testSnapshotDSDuringRollover() throws Exception { final SnapshotInfo snapshotInfo = assertSuccessful(snapshotFuture); assertThat(snapshotInfo.dataStreams(), hasItems("ds")); - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).get()); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) + ).get() + ); RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) @@ -1179,7 +1213,12 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { snapshotInfo.dataStreams(), not(hasItems("ds")) ); - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "other-ds" }))); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "other-ds" }) + ) + ); RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) @@ -1240,7 +1279,10 @@ public void testRestoreSnapshotFully() throws Exception { createIndexWithContent(indexName); createFullSnapshot(REPO, snapshotName); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "*" })).get()); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" })) + .get() + ); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.lenientExpandOpenHidden()).get()); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -1250,7 +1292,7 @@ public void testRestoreSnapshotFully() throws Exception { .get(); assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(3)); assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get()); } @@ -1258,10 +1300,10 @@ public void testRestoreSnapshotFully() throws Exception { public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception { var snapshotName = "test-snapshot"; createFullSnapshot(REPO, snapshotName); - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet(); DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); try { - var request = new CreateDataStreamAction.Request("my-alias"); + var request = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "my-alias"); assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var e = expectThrows( IllegalStateException.class, @@ -1270,7 +1312,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } finally { // Need to remove data streams in order to remove template - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet(); // Need to remove template, because base class doesn't remove composable index templates after each test (only legacy templates) client.execute( TransportDeleteComposableIndexTemplateAction.TYPE, @@ -1282,7 +1324,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Exception { var snapshotName = "test-snapshot"; createFullSnapshot(REPO, snapshotName); - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet(); CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("my-alias")); assertAcked(client.admin().indices().create(createIndexRequest).actionGet()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java new file mode 100644 index 0000000000000..a52016e8c7f0b --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +/** + * An integration test that verifies how different paths/scenarios affect the APM metrics for failure stores. + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.SUITE) +public class IngestFailureStoreMetricsIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private String template; + private String dataStream; + private String pipeline; + + @Before + public void initializeRandomNames() { + template = "template-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + dataStream = "data-stream-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + pipeline = "pipeline-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + logger.info( + "--> running [{}] with generated names data stream [{}], template [{}] and pipeline [{}]", + getTestName(), + dataStream, + template, + pipeline + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, CustomIngestTestPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + public void testNoPipelineNoFailures() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testFailingPipelineNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE, + false + ); + } + + public void testFailingPipelineWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testShardFailureNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + false + ); + } + + public void testShardFailureWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + /** + * Make sure the rejected counter gets incremented when there were shard-level failures while trying to redirect a document to the + * failure store. + */ + public void testRejectionFromFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + // Initialize failure store. + var rolloverRequest = new RolloverRequest(dataStream, null); + rolloverRequest.setIndicesOptions( + IndicesOptions.builder(rolloverRequest.indicesOptions()) + .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .build() + ); + var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); + var failureStoreIndex = rolloverResponse.getNewIndex(); + // Add a write block to the failure store index, which causes shard-level "failures". + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.WRITE, failureStoreIndex); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + true + ); + } + + /** + * Make sure metrics get the correct data_stream attribute after a reroute. + */ + public void testRerouteSuccessfulCorrectName() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + String destination = dataStream + "-destination"; + final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + createReroutePipeline(destination); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, destination); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDropping() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("drop"); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDataStreamAlias() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + var indicesAliasesRequest = new IndicesAliasesRequest(); + indicesAliasesRequest.addAliasAction( + IndicesAliasesRequest.AliasActions.add().alias("some-alias").index(dataStream).writeIndex(true) + ); + client().execute(TransportIndicesAliasesAction.TYPE, indicesAliasesRequest).actionGet(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs("some-alias", nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + private void putComposableIndexTemplate(boolean failureStore) throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(template); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, failureStore)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + private void createDataStream() { + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + } + + private void createBasicPipeline(String processorType) { + createPipeline(Strings.format("\"%s\": {}", processorType)); + } + + private void createReroutePipeline(String destination) { + createPipeline(Strings.format("\"reroute\": {\"destination\": \"%s\"}", destination)); + } + + private void createPipeline(String processor) { + String pipelineDefinition = Strings.format("{\"processors\": [{%s}]}", processor); + BytesReference bytes = new BytesArray(pipelineDefinition); + clusterAdmin().putPipeline(new PutPipelineRequest(pipeline, bytes, XContentType.JSON)).actionGet(); + } + + private void indexDocs(String dataStream, int numDocs, String pipeline) { + indexDocs(dataStream, numDocs, "1", pipeline); + } + + private void indexDocs(String dataStream, int numDocs, String value, String pipeline) { + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON) + .setPipeline(pipeline) + ); + } + client().bulk(bulkRequest).actionGet(); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertMeasurements(List measurements, int expectedSize, String expectedDataStream) { + assertMeasurements(measurements, expectedSize, expectedDataStream, (Consumer) null); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location + ) { + assertMeasurements( + measurements, + expectedSize, + expectedDataStream, + measurement -> assertEquals(location.name(), measurement.attributes().get("error_location")) + ); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location, + boolean failureStore + ) { + assertMeasurements(measurements, expectedSize, expectedDataStream, measurement -> { + assertEquals(location.name(), measurement.attributes().get("error_location")); + assertEquals(failureStore, measurement.attributes().get("failure_store")); + }); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + Consumer customAssertion + ) { + assertEquals(expectedSize, measurements.size()); + for (Measurement measurement : measurements) { + assertEquals(expectedDataStream, measurement.attributes().get("data_stream")); + if (customAssertion != null) { + customAssertion.accept(measurement); + } + } + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put( + "drop", + (factories, tag, description, config) -> new TestProcessor(tag, "drop", description, ingestDocument -> null) + ); + processors.put("reroute", (factories, tag, description, config) -> { + String destination = (String) config.remove("destination"); + return new TestProcessor( + tag, + "reroute", + description, + (Consumer) ingestDocument -> ingestDocument.reroute(destination) + ); + }); + processors.put( + "fail", + (processorFactories, tag, description, config) -> new TestProcessor(tag, "fail", description, new RuntimeException()) + ); + return processors; + } + } +} diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java index 89d576e74be2f..726f2c21cec7f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java @@ -95,11 +95,10 @@ public void onFailure(Exception e) { } private DataStream getDataStream(String dataStreamName) { - return client().execute(GetDataStreamAction.INSTANCE, new GetDataStreamAction.Request(new String[] { dataStreamName })) - .actionGet() - .getDataStreams() - .get(0) - .getDataStream(); + return client().execute( + GetDataStreamAction.INSTANCE, + new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }) + ).actionGet().getDataStreams().get(0).getDataStream(); } private void createDataStream(String dataStreamName) throws InterruptedException, ExecutionException { @@ -117,7 +116,11 @@ private void createDataStream(String dataStreamName) throws InterruptedException ).actionGet(); assertThat(putComposableTemplateResponse.isAcknowledged(), is(true)); - final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); final AcknowledgedResponse createDataStreamResponse = client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) .get(); assertThat(createDataStreamResponse.isAcknowledged(), is(true)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index 52ce2a7a33ea6..8db3ed6ddc9c9 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -277,7 +277,10 @@ public void testInvalidIndexModeTimeSeriesSwitchWithoutDimensions() throws IOExc } private void assertDataStreamBackingIndicesModes(final String dataStreamName, final List modes) { - final GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + final GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); final GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); final DataStream dataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); @@ -362,7 +365,11 @@ private void indexTimeSeriesDocuments( } private void createDataStream(final Client client, final String dataStreamName) throws InterruptedException, ExecutionException { - final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); final AcknowledgedResponse createDataStreamResponse = client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) .get(); assertThat(createDataStreamResponse.isAcknowledged(), is(true)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java index eca45b45d1269..ef785086a0ef4 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -327,10 +327,14 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept aliases.put(dataStreamLocalAlias, AliasMetadata.builder(dataStreamLocalAlias).writeIndex(randomBoolean()).build()); } putComposableIndexTemplate(client, "id1", List.of(dataStreamLocal + "*"), aliases); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); assertAcked(client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get()); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); @@ -358,10 +362,14 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept aliases.put(dataStreamRemote1Alias, AliasMetadata.builder(dataStreamRemote1Alias).writeIndex(randomBoolean()).build()); } putComposableIndexTemplate(client, "id2", List.of(dataStreamRemote1 + "*"), aliases); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-bar"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-bar" + ); assertAcked(client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get()); - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { "*" }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java index ec871b201bbdb..a07081e8f0dd9 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamIT.java @@ -344,6 +344,7 @@ public String getFeatureDescription() { public void cleanUpFeature(ClusterService clusterService, Client client, ActionListener listener) { Collection dataStreamDescriptors = getSystemDataStreamDescriptors(); final DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, dataStreamDescriptors.stream() .map(SystemDataStreamDescriptor::getDataStreamName) .collect(Collectors.toList()) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index c147677cf856c..ba9657f71b23e 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -58,7 +58,11 @@ public void testSystemDataStreamInGlobalState() throws Exception { createRepository(REPO, "fs", location); { - CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(SYSTEM_DATA_STREAM_NAME); + CreateDataStreamAction.Request request = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + SYSTEM_DATA_STREAM_NAME + ); final AcknowledgedResponse response = client().execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); } @@ -71,7 +75,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { assertThat(indexRepsonse.status().getStatus(), oneOf(200, 201)); { - GetDataStreamAction.Request request = new GetDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + GetDataStreamAction.Request request = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); GetDataStreamAction.Response response = client().execute(GetDataStreamAction.INSTANCE, request).get(); assertThat(response.getDataStreams(), hasSize(1)); assertTrue(response.getDataStreams().get(0).getDataStream().isSystem()); @@ -87,7 +94,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { // We have to delete the data stream directly, as the feature reset API doesn't clean up system data streams yet // See https://github.com/elastic/elasticsearch/issues/75818 { - DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); AcknowledgedResponse response = client().execute(DeleteDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); } @@ -127,7 +137,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { assertEquals(restoreSnapshotResponse.getRestoreInfo().totalShards(), restoreSnapshotResponse.getRestoreInfo().successfulShards()); { - GetDataStreamAction.Request request = new GetDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + GetDataStreamAction.Request request = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); GetDataStreamAction.Response response = client().execute(GetDataStreamAction.INSTANCE, request).get(); assertThat(response.getDataStreams(), hasSize(1)); assertTrue(response.getDataStreams().get(0).getDataStream().isSystem()); @@ -144,7 +157,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { private void assertSystemDataStreamDoesNotExist() { try { - GetDataStreamAction.Request request = new GetDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + GetDataStreamAction.Request request = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); GetDataStreamAction.Response response = client().execute(GetDataStreamAction.INSTANCE, request).get(); assertThat(response.getDataStreams(), hasSize(0)); } catch (Exception e) { @@ -161,7 +177,11 @@ public void testSystemDataStreamInFeatureState() throws Exception { createRepository(REPO, "fs", location); { - CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(SYSTEM_DATA_STREAM_NAME); + CreateDataStreamAction.Request request = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + SYSTEM_DATA_STREAM_NAME + ); final AcknowledgedResponse response = client().execute(CreateDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); } @@ -181,7 +201,10 @@ public void testSystemDataStreamInFeatureState() throws Exception { assertThat(indexResponse.status().getStatus(), oneOf(200, 201)); { - GetDataStreamAction.Request request = new GetDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + GetDataStreamAction.Request request = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); GetDataStreamAction.Response response = client().execute(GetDataStreamAction.INSTANCE, request).get(); assertThat(response.getDataStreams(), hasSize(1)); assertTrue(response.getDataStreams().get(0).getDataStream().isSystem()); @@ -201,7 +224,10 @@ public void testSystemDataStreamInFeatureState() throws Exception { // We have to delete the data stream directly, as the feature reset API doesn't clean up system data streams yet // See https://github.com/elastic/elasticsearch/issues/75818 { - DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); AcknowledgedResponse response = client().execute(DeleteDataStreamAction.INSTANCE, request).get(); assertTrue(response.isAcknowledged()); } @@ -221,7 +247,10 @@ public void testSystemDataStreamInFeatureState() throws Exception { assertEquals(restoreSnapshotResponse.getRestoreInfo().totalShards(), restoreSnapshotResponse.getRestoreInfo().successfulShards()); { - GetDataStreamAction.Request request = new GetDataStreamAction.Request(new String[] { SYSTEM_DATA_STREAM_NAME }); + GetDataStreamAction.Request request = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { SYSTEM_DATA_STREAM_NAME } + ); GetDataStreamAction.Response response = client().execute(GetDataStreamAction.INSTANCE, request).get(); assertThat(response.getDataStreams(), hasSize(1)); assertTrue(response.getDataStreams().get(0).getDataStream().isSystem()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 24c373df72144..a0a0681dbd245 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -35,6 +36,7 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -457,6 +459,16 @@ public void testTrimId() throws Exception { indexName = bulkResponse.getItems()[0].getIndex(); } client().admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet(); + + // In rare cases we can end up with a single segment shard, which means we can't trim away the _id later. + // So update an existing doc to create a new segment without adding a new document after force merging: + var indexRequest = new IndexRequest(indexName).setIfPrimaryTerm(1L) + .setIfSeqNo((numBulkRequests * numDocsPerBulk) - 1) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source(DOC.replace("$time", formatInstant(time.minusMillis(1))), XContentType.JSON); + var res = client().index(indexRequest).actionGet(); + assertThat(res.status(), equalTo(RestStatus.OK)); + assertThat(res.getVersion(), equalTo(2L)); } // Check whether there are multiple segments: @@ -494,7 +506,7 @@ public void testTrimId() throws Exception { assertThat(retentionLeasesStats.retentionLeases().leases(), hasSize(1)); assertThat( retentionLeasesStats.retentionLeases().leases().iterator().next().retainingSequenceNumber(), - equalTo((long) numBulkRequests * numDocsPerBulk) + equalTo((long) numBulkRequests * numDocsPerBulk + 1) ); }); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java index b8d7d18dec475..64e4cc2ba9577 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java @@ -142,10 +142,7 @@ protected Settings nodeSettings() { } public void testIndexingGettingAndSearching() throws Exception { - var templateSettings = Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_shards", randomIntBetween(2, 10)) - .put("index.number_of_replicas", 0); + var templateSettings = indexSettings(randomIntBetween(2, 10), 0).put("index.mode", "time_series"); var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( @@ -218,10 +215,7 @@ public void testIndexingGettingAndSearching() throws Exception { public void testIndexingGettingAndSearchingShrunkIndex() throws Exception { String dataStreamName = "k8s"; - var templateSettings = Settings.builder() - .put("index.mode", "time_series") - .put("index.number_of_shards", 8) - .put("index.number_of_replicas", 0); + var templateSettings = indexSettings(8, 0).put("index.mode", "time_series"); var request = new TransportPutComposableIndexTemplateAction.Request("id"); request.indexTemplate( diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index 30f6ef313c41c..27d8bdd2ac27c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -43,23 +43,38 @@ public void testGetLifecycle() throws Exception { putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null); { String dataStreamName = "with-lifecycle-1"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } { String dataStreamName = "with-lifecycle-2"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } { String dataStreamName = "without-lifecycle"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } // Test retrieving all lifecycles { - GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request(new String[] { "*" }); + GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { "*" } + ); GetDataStreamLifecycleAction.Response response = client().execute( GetDataStreamLifecycleAction.INSTANCE, getDataLifecycleRequest @@ -77,6 +92,7 @@ public void testGetLifecycle() throws Exception { // Test retrieving all lifecycles prefixed wildcard { GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "with-lifecycle*" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -94,6 +110,7 @@ public void testGetLifecycle() throws Exception { // Test retrieving concrete data streams { GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "with-lifecycle-1", "with-lifecycle-2" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -108,6 +125,7 @@ public void testGetLifecycle() throws Exception { // Test include defaults GetDataStreamLifecycleAction.Request getDataLifecycleRequestWithDefaults = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "*" } ).includeDefaults(true); GetDataStreamLifecycleAction.Response responseWithRollover = client().execute( @@ -128,11 +146,16 @@ public void testPutLifecycle() throws Exception { putComposableIndexTemplate("id1", null, List.of("my-data-stream*"), null, null, null); // Create index without a lifecycle String dataStreamName = "my-data-stream"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); { GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "my-data-stream" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -149,6 +172,8 @@ public void testPutLifecycle() throws Exception { { TimeValue dataRetention = randomBoolean() ? null : TimeValue.timeValueMillis(randomMillisUpToYear9999()); PutDataStreamLifecycleAction.Request putDataLifecycleRequest = new PutDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, new String[] { "*" }, dataRetention ); @@ -157,6 +182,7 @@ public void testPutLifecycle() throws Exception { equalTo(true) ); GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "my-data-stream" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -173,6 +199,8 @@ public void testPutLifecycle() throws Exception { { TimeValue dataRetention = randomBoolean() ? null : TimeValue.timeValueMillis(randomMillisUpToYear9999()); PutDataStreamLifecycleAction.Request putDataLifecycleRequest = new PutDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, new String[] { "*" }, dataRetention, false @@ -182,6 +210,7 @@ public void testPutLifecycle() throws Exception { equalTo(true) ); GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "my-data-stream" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -201,23 +230,36 @@ public void testDeleteLifecycle() throws Exception { putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null); { String dataStreamName = "with-lifecycle-1"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } { String dataStreamName = "with-lifecycle-2"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } { String dataStreamName = "with-lifecycle-3"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } // Verify that we have 3 data streams with lifecycles { GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "with-lifecycle*" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -239,6 +281,7 @@ public void testDeleteLifecycle() throws Exception { equalTo(true) ); GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "with-lifecycle*" } ); GetDataStreamLifecycleAction.Response response = client().execute( @@ -266,6 +309,7 @@ public void testDeleteLifecycle() throws Exception { equalTo(true) ); GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { "with-lifecycle*" } ); GetDataStreamLifecycleAction.Response response = client().execute( diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java index 9325bb41ab52e..9fafc23a388f4 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudSystemDataStreamLifecycleIT.java @@ -233,6 +233,7 @@ public String getFeatureDescription() { public void cleanUpFeature(ClusterService clusterService, Client client, ActionListener listener) { Collection dataStreamDescriptors = getSystemDataStreamDescriptors(); final DeleteDataStreamAction.Request request = new DeleteDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, dataStreamDescriptors.stream().map(SystemDataStreamDescriptor::getDataStreamName).toList().toArray(Strings.EMPTY_ARRAY) ); request.indicesOptions( diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index e7dfdcdaffa05..ee17521ad757d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -51,8 +51,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; -import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.GetHealthAction; @@ -149,13 +147,20 @@ public void testRolloverLifecycle() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle, false); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -175,13 +180,20 @@ public void testRolloverAndRetention() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle, false); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -206,29 +218,19 @@ public void testSystemDataStreamRetention() throws Exception { AtomicLong now = new AtomicLong(clock.millis()); dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(now::get)); try { - // Putting in place a global retention that we expect will be ignored by the system data stream: - final int globalRetentionSeconds = 10; - client().execute( - PutDataStreamGlobalRetentionAction.INSTANCE, - new PutDataStreamGlobalRetentionAction.Request( - TEST_REQUEST_TIMEOUT, - TimeValue.timeValueSeconds(globalRetentionSeconds), - TimeValue.timeValueSeconds(globalRetentionSeconds) - ) - ).actionGet(); try { - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(SYSTEM_DATA_STREAM_NAME); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + SYSTEM_DATA_STREAM_NAME + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet(); indexDocs(SYSTEM_DATA_STREAM_NAME, 1); - /* - * First we advance the time to well beyond the global retention (10s) but well under the configured retention (100d). - * We expect to see that rollover has occurred but that the old index has not been deleted since the global retention is - * ignored. - */ - now.addAndGet(TimeValue.timeValueSeconds(3 * globalRetentionSeconds).millis()); + now.addAndGet(TimeValue.timeValueSeconds(30).millis()); assertBusy(() -> { GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { SYSTEM_DATA_STREAM_NAME } ); GetDataStreamAction.Response getDataStreamResponse = client().execute( @@ -248,6 +250,7 @@ public void testSystemDataStreamRetention() throws Exception { now.addAndGet(TimeValue.timeValueDays(3 * TestSystemDataStreamPlugin.SYSTEM_DATA_STREAM_RETENTION_DAYS).millis()); assertBusy(() -> { GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { SYSTEM_DATA_STREAM_NAME } ); GetDataStreamAction.Response getDataStreamResponse = client().execute( @@ -297,12 +300,12 @@ public void testSystemDataStreamRetention() throws Exception { } }); - client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(SYSTEM_DATA_STREAM_NAME)).actionGet(); - } finally { client().execute( - DeleteDataStreamGlobalRetentionAction.INSTANCE, - new DeleteDataStreamGlobalRetentionAction.Request(TEST_REQUEST_TIMEOUT) - ); + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, SYSTEM_DATA_STREAM_NAME) + ).actionGet(); + } finally { + // reset properties } } finally { dataStreamLifecycleServices.forEach(dataStreamLifecycleService -> dataStreamLifecycleService.setNowSupplier(clock::millis)); @@ -320,7 +323,11 @@ public void testOriginationDate() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle, false); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); @@ -347,7 +354,11 @@ public void testOriginationDate() throws Exception { createIndex(indexWithOldOriginationDate, Settings.builder().put(LIFECYCLE_ORIGINATION_DATE, originTimeMillis).build()); client().execute( ModifyDataStreamsAction.INSTANCE, - new ModifyDataStreamsAction.Request(List.of(DataStreamAction.addBackingIndex(dataStreamName, indexWithOldOriginationDate))) + new ModifyDataStreamsAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + List.of(DataStreamAction.addBackingIndex(dataStreamName, indexWithOldOriginationDate)) + ) ).get(); String indexWithNewOriginationDate = "index_new"; @@ -355,11 +366,18 @@ public void testOriginationDate() throws Exception { createIndex(indexWithNewOriginationDate, Settings.builder().put(LIFECYCLE_ORIGINATION_DATE, originTimeMillis).build()); client().execute( ModifyDataStreamsAction.INSTANCE, - new ModifyDataStreamsAction.Request(List.of(DataStreamAction.addBackingIndex(dataStreamName, indexWithNewOriginationDate))) + new ModifyDataStreamsAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + List.of(DataStreamAction.addBackingIndex(dataStreamName, indexWithNewOriginationDate)) + ) ).get(); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -377,7 +395,11 @@ public void testUpdatingLifecycleAppliesToAllBackingIndices() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle, false); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet(); client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet(); @@ -387,7 +409,10 @@ public void testUpdatingLifecycleAppliesToAllBackingIndices() throws Exception { updateLifecycle(dataStreamName, TimeValue.timeValueMillis(1)); // Verify that the retention has changed for all backing indices assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -416,10 +441,7 @@ public void testAutomaticForceMerge() throws Exception { "id1", null, List.of(dataStreamName + "*"), - Settings.builder() - .put("index.number_of_replicas", 1) - .put("index.number_of_shards", 1) - .put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB) + indexSettings(1, 1).put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB) .put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), TARGET_MERGE_FACTOR_VALUE) .build(), null, @@ -441,7 +463,11 @@ public void testAutomaticForceMerge() throws Exception { }); } - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); int finalGeneration = randomIntBetween(3, 4); for (int currentGeneration = 1; currentGeneration < finalGeneration; currentGeneration++) { @@ -469,7 +495,10 @@ public void testAutomaticForceMerge() throws Exception { // run data stream lifecycle once dataStreamLifecycleService.run(clusterService.state()); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -524,14 +553,21 @@ public void testErrorRecordingOnRollover() throws Exception { ); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); // let's allow one rollover to go through assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -565,6 +601,7 @@ public void testErrorRecordingOnRollover() throws Exception { assertThat(writeIndexRolloverError.error(), containsString("maximum normal shards open")); ExplainDataStreamLifecycleAction.Request explainRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { dataStreamName } ); ExplainDataStreamLifecycleAction.Response response = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, explainRequest) @@ -675,13 +712,20 @@ public void testErrorRecordingOnRetention() throws Exception { ); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); // let's allow one rollover to go through assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -704,7 +748,10 @@ public void testErrorRecordingOnRetention() throws Exception { updateLifecycle(dataStreamName, TimeValue.timeValueSeconds(1)); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -774,7 +821,10 @@ public void testErrorRecordingOnRetention() throws Exception { updateIndexSettings(Settings.builder().put(READ_ONLY.settingName(), false), firstGenerationIndex); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -838,13 +888,20 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception ); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); // let's allow one rollover to go through assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -921,7 +978,11 @@ public void testReenableDataStreamLifecycle() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle, false); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 10); @@ -932,7 +993,7 @@ public void testReenableDataStreamLifecycle() throws Exception { ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute( ExplainDataStreamLifecycleAction.INSTANCE, - new ExplainDataStreamLifecycleAction.Request(new String[] { writeIndex }) + new ExplainDataStreamLifecycleAction.Request(TEST_REQUEST_TIMEOUT, new String[] { writeIndex }) ).actionGet(); assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(1)); for (ExplainIndexDataStreamLifecycle index : dataStreamLifecycleExplainResponse.getIndices()) { @@ -951,7 +1012,13 @@ public void testReenableDataStreamLifecycle() throws Exception { client().execute( PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, null, true) + new PutDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName }, + null, + true + ) ); assertBusy(() -> { @@ -994,14 +1061,21 @@ public void testLifecycleAppliedToFailureStore() throws Exception { }""", List.of("metrics-fs*"), Settings.builder().put("index.number_of_replicas", 0).build(), null, lifecycle, true); String dataStreamName = "metrics-fs"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexInvalidFlagDocs(dataStreamName, 1); // Let's verify the rollover assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1043,7 +1117,10 @@ public void testLifecycleAppliedToFailureStore() throws Exception { // And finally apply retention assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1057,7 +1134,10 @@ public void testLifecycleAppliedToFailureStore() throws Exception { } private static List getBackingIndices(String dataStreamName) { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1066,7 +1146,10 @@ private static List getBackingIndices(String dataStreamName) { } private static List getFailureIndices(String dataStreamName) { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -1147,6 +1230,8 @@ static void putComposableIndexTemplate( static void updateLifecycle(String dataStreamName, TimeValue dataRetention) { PutDataStreamLifecycleAction.Request putDataLifecycleRequest = new PutDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }, dataRetention ); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 571bfc05b6464..5a5b451a62c3a 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -30,8 +30,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; -import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.plugins.Plugin; @@ -91,13 +89,20 @@ public void testExplainLifecycle() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -112,6 +117,7 @@ public void testExplainLifecycle() throws Exception { { ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { DataStream.getDefaultBackingIndexName(dataStreamName, 1), DataStream.getDefaultBackingIndexName(dataStreamName, 2) } @@ -152,6 +158,7 @@ public void testExplainLifecycle() throws Exception { { // let's also explain with include_defaults=true ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { DataStream.getDefaultBackingIndexName(dataStreamName, 1), DataStream.getDefaultBackingIndexName(dataStreamName, 2) }, @@ -173,6 +180,7 @@ public void testExplainLifecycle() throws Exception { { // Let's also explain using the data stream name ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { dataStreamName } ); ExplainDataStreamLifecycleAction.Response response = client().execute( @@ -208,25 +216,22 @@ public void testSystemExplainLifecycle() throws Exception { * This test makes sure that for system data streams, we correctly ignore the global retention when calling * ExplainDataStreamLifecycle. It is very similar to testExplainLifecycle, but only focuses on the retention for a system index. */ - // Putting in place a global retention that we expect will be ignored by the system data stream: - final int globalRetentionSeconds = 10; - client().execute( - PutDataStreamGlobalRetentionAction.INSTANCE, - new PutDataStreamGlobalRetentionAction.Request( - TEST_REQUEST_TIMEOUT, - TimeValue.timeValueSeconds(globalRetentionSeconds), - TimeValue.timeValueSeconds(globalRetentionSeconds) - ) - ).actionGet(); try { String dataStreamName = SYSTEM_DATA_STREAM_NAME; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -240,6 +245,7 @@ public void testSystemExplainLifecycle() throws Exception { }); ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { DataStream.getDefaultBackingIndexName(dataStreamName, 1), DataStream.getDefaultBackingIndexName(dataStreamName, 2) } @@ -261,10 +267,7 @@ public void testSystemExplainLifecycle() throws Exception { ); } } finally { - client().execute( - DeleteDataStreamGlobalRetentionAction.INSTANCE, - new DeleteDataStreamGlobalRetentionAction.Request(TEST_REQUEST_TIMEOUT) - ); + // reset properties } } @@ -275,14 +278,21 @@ public void testExplainLifecycleForIndicesWithErrors() throws Exception { putComposableIndexTemplate("id1", null, List.of("metrics-foo*"), null, null, lifecycle); String dataStreamName = "metrics-foo"; - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 1); // let's allow one rollover to go through assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) .actionGet(); assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); @@ -303,6 +313,7 @@ public void testExplainLifecycleForIndicesWithErrors() throws Exception { String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 2); assertBusy(() -> { ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { writeIndexName } ); ExplainDataStreamLifecycleAction.Response response = client().execute( @@ -334,6 +345,7 @@ public void testExplainLifecycleForIndicesWithErrors() throws Exception { assertBusy(() -> { ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { writeIndexName } ); ExplainDataStreamLifecycleAction.Response response = client().execute( @@ -363,7 +375,11 @@ public void testExplainDataStreamLifecycleForUnmanagedIndices() throws Exception null, DataStreamLifecycle.newBuilder().enabled(false).build() ); - CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request("metrics-foo"); + CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "metrics-foo" + ); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 4); @@ -371,6 +387,7 @@ public void testExplainDataStreamLifecycleForUnmanagedIndices() throws Exception String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 1); assertBusy(() -> { ExplainDataStreamLifecycleAction.Request explainIndicesRequest = new ExplainDataStreamLifecycleAction.Request( + TEST_REQUEST_TIMEOUT, new String[] { writeIndexName } ); ExplainDataStreamLifecycleAction.Response response = client().execute( diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java index 780864db8b629..1500674e3aee9 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamRestIT.java @@ -28,6 +28,7 @@ import java.util.Locale; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class LogsDataStreamRestIT extends ESRestTestCase { @@ -102,19 +103,53 @@ private static void waitForLogs(RestClient client) throws Exception { private static final String STANDARD_TEMPLATE = """ { - "index_patterns": [ "logs-*-*" ], - "data_stream": {}, - "priority": 201, - "template": { - "settings": { - "index": { - "mode": "standard" + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 201, + "template": { + "settings": { + "index": { + "mode": "standard" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "pid": { + "type": "long" + }, + "method": { + "type": "keyword" + }, + "ip_address": { + "type": "ip" } - }, - "mappings": { - "properties": { - "@timestamp" : { - "type": "date" + } + } + } + }"""; + + private static final String TIME_SERIES_TEMPLATE = """ + { + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 201, + "template": { + "settings": { + "index": { + "mode": "time_series", + "look_ahead_time": "5m" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" }, "host.name": { "type": "keyword", @@ -129,6 +164,10 @@ private static void waitForLogs(RestClient client) throws Exception { }, "ip_address": { "type": "ip" + }, + "memory_usage_bytes": { + "type": "long", + "time_series_metric": "gauge" } } } @@ -142,7 +181,8 @@ private static void waitForLogs(RestClient client) throws Exception { "pid": "%d", "method": "%s", "message": "%s", - "ip_address": "%s" + "ip_address": "%s", + "memory_usage_bytes": "%d" } """; @@ -158,7 +198,8 @@ public void testLogsIndexing() throws IOException { randomNonNegativeLong(), randomFrom("PUT", "POST", "GET"), randomAlphaOfLength(32), - randomIp(randomBoolean()) + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) ) ); assertDataStreamBackingIndexMode("logsdb", 0); @@ -172,7 +213,8 @@ public void testLogsIndexing() throws IOException { randomNonNegativeLong(), randomFrom("PUT", "POST", "GET"), randomAlphaOfLength(32), - randomIp(randomBoolean()) + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) ) ); assertDataStreamBackingIndexMode("logsdb", 1); @@ -190,7 +232,8 @@ public void testLogsStandardIndexModeSwitch() throws IOException { randomNonNegativeLong(), randomFrom("PUT", "POST", "GET"), randomAlphaOfLength(32), - randomIp(randomBoolean()) + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) ) ); assertDataStreamBackingIndexMode("logsdb", 0); @@ -206,7 +249,8 @@ public void testLogsStandardIndexModeSwitch() throws IOException { randomNonNegativeLong(), randomFrom("PUT", "POST", "GET"), randomAlphaOfLength(64), - randomIp(randomBoolean()) + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) ) ); assertDataStreamBackingIndexMode("standard", 1); @@ -222,7 +266,61 @@ public void testLogsStandardIndexModeSwitch() throws IOException { randomNonNegativeLong(), randomFrom("PUT", "POST", "GET"), randomAlphaOfLength(32), - randomIp(randomBoolean()) + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) + ) + ); + assertDataStreamBackingIndexMode("logsdb", 2); + } + + public void testLogsTimeSeriesIndexModeSwitch() throws IOException { + putTemplate(client, "custom-template", LOGS_TEMPLATE); + createDataStream(client, DATA_STREAM_NAME); + indexDocument( + client, + DATA_STREAM_NAME, + document( + Instant.now(), + randomAlphaOfLength(10), + randomNonNegativeLong(), + randomFrom("PUT", "POST", "GET"), + randomAlphaOfLength(32), + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) + ) + ); + assertDataStreamBackingIndexMode("logsdb", 0); + + putTemplate(client, "custom-template", TIME_SERIES_TEMPLATE); + rolloverDataStream(client, DATA_STREAM_NAME); + indexDocument( + client, + DATA_STREAM_NAME, + document( + Instant.now().plusSeconds(10), + randomAlphaOfLength(10), + randomNonNegativeLong(), + randomFrom("PUT", "POST", "GET"), + randomAlphaOfLength(64), + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) + ) + ); + assertDataStreamBackingIndexMode("time_series", 1); + + putTemplate(client, "custom-template", LOGS_TEMPLATE); + rolloverDataStream(client, DATA_STREAM_NAME); + indexDocument( + client, + DATA_STREAM_NAME, + document( + Instant.now().plusSeconds(320), // 5 mins index.look_ahead_time + randomAlphaOfLength(10), + randomNonNegativeLong(), + randomFrom("PUT", "POST", "GET"), + randomAlphaOfLength(32), + randomIp(randomBoolean()), + randomLongBetween(1_000_000L, 2_000_000L) ) ); assertDataStreamBackingIndexMode("logsdb", 2); @@ -238,17 +336,19 @@ private String document( long pid, final String method, final String message, - final InetAddress ipAddress + final InetAddress ipAddress, + long memoryUsageBytes ) { return String.format( Locale.ROOT, DOC_TEMPLATE, - DateFormatter.forPattern(FormatNames.DATE.getName()).format(timestamp), + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(timestamp), hostname, pid, method, message, - InetAddresses.toAddrString(ipAddress) + InetAddresses.toAddrString(ipAddress), + memoryUsageBytes ); } @@ -266,7 +366,9 @@ private static void putTemplate(final RestClient client, final String templateNa private static void indexDocument(final RestClient client, String dataStreamName, String doc) throws IOException { final Request request = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); request.setJsonEntity(doc); - assertOK(client.performRequest(request)); + final Response response = client.performRequest(request); + assertOK(response); + assertThat(entityAsMap(response).get("result"), equalTo("created")); } private static void rolloverDataStream(final RestClient client, final String dataStreamName) throws IOException { diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java new file mode 100644 index 0000000000000..514eb6d8742ea --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void setup() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + // Create a template with the default lifecycle + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create a data streams with one doc + Request createDocRequest = new Request("POST", "/my-data-stream/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*")); + updateClusterSettings( + Settings.builder().putNull("data_streams.lifecycle.retention.default").putNull("data_streams.lifecycle.retention.max").build() + ); + } + + @SuppressWarnings("unchecked") + public void testDataStreamRetention() throws Exception { + // Set global retention and add retention to the data stream + { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.retention.default", "7d") + .put("data_streams.lifecycle.retention.default", "90d") + .build() + ); + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "10s" + }"""); + assertAcknowledged(client().performRequest(request)); + } + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("data_stream_configuration")); + assertThat(lifecycle.get("data_retention"), is("10s")); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testDefaultRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.default", "10s").build()); + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("default_global_retention")); + assertThat(lifecycle.get("data_retention"), nullValue()); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testMaxRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.max", "10s").build()); + boolean withDataStreamLevelRetention = randomBoolean(); + if (withDataStreamLevelRetention) { + try { + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "30d" + }"""); + assertAcknowledged(client().performRequest(request)); + fail("Should have returned a warning about data retention exceeding the max retention"); + } catch (WarningFailureException warningFailureException) { + assertThat( + warningFailureException.getMessage(), + containsString("The retention provided [30d] is exceeding the max allowed data retention of this project [10s]") + ); + } + } + + // Verify that the effective retention matches the max retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("max_global_retention")); + if (withDataStreamLevelRetention) { + assertThat(lifecycle.get("data_retention"), is("30d")); + } else { + assertThat(lifecycle.get("data_retention"), nullValue()); + } + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java index 90e904b554c8d..d3a2867fe2ecd 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java @@ -111,7 +111,7 @@ public void testConfigureStoredSource() throws IOException { e.getMessage(), containsString("updating component template [logs@custom] results in invalid composable template [logs]") ); - assertThat(e.getMessage(), containsString("Indices with with index mode [logs] only support synthetic source")); + assertThat(e.getMessage(), containsString("Indices with with index mode [logsdb] only support synthetic source")); assertOK(createDataStream(client, "logs-custom-dev")); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/AbstractChallengeRestTest.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/AbstractChallengeRestTest.java index 8ee0e4d715c4c..1d36a04657e9c 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/AbstractChallengeRestTest.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/AbstractChallengeRestTest.java @@ -186,25 +186,27 @@ private void deleteTemplates() throws IOException { assert deleteContenderTemplate.getStatusLine().getStatusCode() == RestStatus.OK.getStatus(); } - private Settings.Builder createSettings(final CheckedConsumer settingsConsumer) throws IOException { + private Settings.Builder createSettings( + final CheckedConsumer settingsConsumer, + final CheckedConsumer commonSettingsConsumer + ) throws IOException { final Settings.Builder settings = Settings.builder(); settingsConsumer.accept(settings); + commonSettingsConsumer.accept(settings); return settings; } private Settings.Builder createBaselineSettings() throws IOException { - return createSettings(this::baselineSettings); + return createSettings(this::baselineSettings, this::commonSettings); } private Settings.Builder createContenderSettings() throws IOException { - return createSettings(this::contenderSettings); + return createSettings(this::contenderSettings, this::commonSettings); } private XContentBuilder createMappings(final CheckedConsumer builderConsumer) throws IOException { final XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); builderConsumer.accept(builder); - builder.endObject(); return builder; } @@ -224,14 +226,18 @@ public void baselineSettings(Settings.Builder builder) {} public void contenderSettings(Settings.Builder builder) {} + public void commonSettings(Settings.Builder builder) {} + private Response indexDocuments( final String dataStreamName, final CheckedSupplier, IOException> documentsSupplier ) throws IOException { final StringBuilder sb = new StringBuilder(); + int id = 0; for (var document : documentsSupplier.get()) { - sb.append("{ \"create\": {} }").append("\n"); + sb.append(Strings.format("{ \"create\": { \"_id\" : \"%d\" } }", id)).append("\n"); sb.append(Strings.toString(document)).append("\n"); + id++; } var request = new Request("POST", "/" + dataStreamName + "/_bulk"); request.setJsonEntity(sb.toString()); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java index 79e96b7446042..9bf1c394f9105 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.MatcherException; +import org.elasticsearch.datastreams.logsdb.qa.matchers.MatchResult; import org.elasticsearch.datastreams.logsdb.qa.matchers.Matcher; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; @@ -34,30 +34,85 @@ import java.io.IOException; import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.time.temporal.ChronoUnit; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Basic challenge test - we index same documents into an index with standard index mode and an index with logsdb index mode. + * Then we verify that results of common operations are the same modulo knows differences like synthetic source modifications. + * This test uses simple mapping and document structure in order to allow easier debugging of the test itself. + */ public class StandardVersusLogsIndexModeChallengeRestIT extends AbstractChallengeRestTest { + private final int numShards = randomBoolean() ? randomIntBetween(2, 5) : 0; + private final int numReplicas = randomBoolean() ? randomIntBetween(1, 3) : 0; + private final boolean fullyDynamicMapping = randomBoolean(); public StandardVersusLogsIndexModeChallengeRestIT() { - super("logs-apache-baseline", "logs-apache-contender", "baseline-template", "contender-template", 99, 99); + super("standard-apache-baseline", "logs-apache-contender", "baseline-template", "contender-template", 101, 101); } @Override public void baselineMappings(XContentBuilder builder) throws IOException { - mappings(builder); + if (fullyDynamicMapping == false) { + builder.startObject() + .startObject("properties") + + .startObject("@timestamp") + .field("type", "date") + .endObject() + + .startObject("host.name") + .field("type", "keyword") + .field("ignore_above", randomIntBetween(1000, 1200)) + .endObject() + + .startObject("message") + .field("type", "keyword") + .field("ignore_above", randomIntBetween(1000, 1200)) + .endObject() + + .startObject("method") + .field("type", "keyword") + .field("ignore_above", randomIntBetween(1000, 1200)) + .endObject() + + .startObject("memory_usage_bytes") + .field("type", "long") + .field("ignore_malformed", randomBoolean()) + .endObject() + + .endObject() + + .endObject(); + } else { + // We want dynamic mapping, but we need host.name to be a keyword instead of text to support aggregations. + builder.startObject() + .startObject("properties") + + .startObject("host.name") + .field("type", "keyword") + .field("ignore_above", randomIntBetween(1000, 1200)) + .endObject() + + .endObject() + .endObject(); + } } @Override public void contenderMappings(XContentBuilder builder) throws IOException { - mappings(builder); - } - - private static void mappings(final XContentBuilder builder) throws IOException { + builder.startObject(); builder.field("subobjects", false); - if (randomBoolean()) { + + if (fullyDynamicMapping == false) { builder.startObject("properties") .startObject("@timestamp") @@ -86,27 +141,28 @@ private static void mappings(final XContentBuilder builder) throws IOException { .endObject(); } + + builder.endObject(); } - private static void settings(final Settings.Builder settings) { - if (randomBoolean()) { - settings.put("index.number_of_shards", randomIntBetween(2, 5)); + @Override + public void commonSettings(Settings.Builder builder) { + if (numShards > 0) { + builder.put("index.number_of_shards", numShards); } - if (randomBoolean()) { - settings.put("index.number_of_replicas", randomIntBetween(1, 3)); + if (numReplicas > 0) { + builder.put("index.number_of_replicas", numReplicas); } + builder.put("index.mapping.total_fields.limit", 5000); } @Override public void contenderSettings(Settings.Builder builder) { builder.put("index.mode", "logsdb"); - settings(builder); } @Override - public void baselineSettings(Settings.Builder builder) { - settings(builder); - } + public void baselineSettings(Settings.Builder builder) {} @Override public void beforeStart() throws Exception { @@ -124,51 +180,45 @@ protected static void waitForLogs(RestClient client) throws Exception { }); } - @SuppressWarnings("unchecked") - public void testMatchAllQuery() throws IOException, MatcherException { - final List documents = new ArrayList<>(); + public void testMatchAllQuery() throws IOException { int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); - for (int i = 0; i < numberOfDocuments; i++) { - documents.add(generateDocument(Instant.now().plus(i, ChronoUnit.SECONDS))); - } + final List documents = generateDocuments(numberOfDocuments); assertDocumentIndexing(documents); final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) .size(numberOfDocuments); - Matcher.mappings(getContenderMappings(), getBaselineMappings()) + final MatchResult matchResult = Matcher.matchSource() + .mappings(getContenderMappings(), getBaselineMappings()) .settings(getContenderSettings(), getBaselineSettings()) .expected(getQueryHits(queryBaseline(searchSourceBuilder))) .ignoringSort(true) .isEqualTo(getQueryHits(queryContender(searchSourceBuilder))); + assertTrue(matchResult.getMessage(), matchResult.isMatch()); } - public void testTermsQuery() throws IOException, MatcherException { - final List documents = new ArrayList<>(); - int numberOfDocuments = randomIntBetween(100, 200); - for (int i = 0; i < numberOfDocuments; i++) { - documents.add(generateDocument(Instant.now().plus(i, ChronoUnit.SECONDS))); - } + public void testTermsQuery() throws IOException { + int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + final List documents = generateDocuments(numberOfDocuments); assertDocumentIndexing(documents); final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(QueryBuilders.termQuery("method", "put")) .size(numberOfDocuments); - Matcher.mappings(getContenderMappings(), getBaselineMappings()) + final MatchResult matchResult = Matcher.matchSource() + .mappings(getContenderMappings(), getBaselineMappings()) .settings(getContenderSettings(), getBaselineSettings()) .expected(getQueryHits(queryBaseline(searchSourceBuilder))) .ignoringSort(true) .isEqualTo(getQueryHits(queryContender(searchSourceBuilder))); + assertTrue(matchResult.getMessage(), matchResult.isMatch()); } - public void testHistogramAggregation() throws IOException, MatcherException { - final List documents = new ArrayList<>(); - int numberOfDocuments = randomIntBetween(100, 200); - for (int i = 0; i < numberOfDocuments; i++) { - documents.add(generateDocument(Instant.now().plus(i, ChronoUnit.SECONDS))); - } + public void testHistogramAggregation() throws IOException { + int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + final List documents = generateDocuments(numberOfDocuments); assertDocumentIndexing(documents); @@ -176,39 +226,35 @@ public void testHistogramAggregation() throws IOException, MatcherException { .size(numberOfDocuments) .aggregation(new HistogramAggregationBuilder("agg").field("memory_usage_bytes").interval(100.0D)); - Matcher.mappings(getContenderMappings(), getBaselineMappings()) + final MatchResult matchResult = Matcher.mappings(getContenderMappings(), getBaselineMappings()) .settings(getContenderSettings(), getBaselineSettings()) .expected(getAggregationBuckets(queryBaseline(searchSourceBuilder), "agg")) .ignoringSort(true) .isEqualTo(getAggregationBuckets(queryContender(searchSourceBuilder), "agg")); + assertTrue(matchResult.getMessage(), matchResult.isMatch()); } - public void testTermsAggregation() throws IOException, MatcherException { - final List documents = new ArrayList<>(); - int numberOfDocuments = randomIntBetween(100, 200); - for (int i = 0; i < numberOfDocuments; i++) { - documents.add(generateDocument(Instant.now().plus(i, ChronoUnit.SECONDS))); - } + public void testTermsAggregation() throws IOException { + int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + final List documents = generateDocuments(numberOfDocuments); assertDocumentIndexing(documents); final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) .size(0) - .aggregation(new TermsAggregationBuilder("agg").field("host.name")); + .aggregation(new TermsAggregationBuilder("agg").field("host.name").size(numberOfDocuments)); - Matcher.mappings(getContenderMappings(), getBaselineMappings()) + final MatchResult matchResult = Matcher.mappings(getContenderMappings(), getBaselineMappings()) .settings(getContenderSettings(), getBaselineSettings()) .expected(getAggregationBuckets(queryBaseline(searchSourceBuilder), "agg")) .ignoringSort(true) .isEqualTo(getAggregationBuckets(queryContender(searchSourceBuilder), "agg")); + assertTrue(matchResult.getMessage(), matchResult.isMatch()); } - public void testDateHistogramAggregation() throws IOException, MatcherException { - final List documents = new ArrayList<>(); - int numberOfDocuments = randomIntBetween(100, 200); - for (int i = 0; i < numberOfDocuments; i++) { - documents.add(generateDocument(Instant.now().plus(i, ChronoUnit.SECONDS))); - } + public void testDateHistogramAggregation() throws IOException { + int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + final List documents = generateDocuments(numberOfDocuments); assertDocumentIndexing(documents); @@ -216,14 +262,26 @@ public void testDateHistogramAggregation() throws IOException, MatcherException .aggregation(AggregationBuilders.dateHistogram("agg").field("@timestamp").calendarInterval(DateHistogramInterval.SECOND)) .size(0); - Matcher.mappings(getContenderMappings(), getBaselineMappings()) + final MatchResult matchResult = Matcher.mappings(getContenderMappings(), getBaselineMappings()) .settings(getContenderSettings(), getBaselineSettings()) .expected(getAggregationBuckets(queryBaseline(searchSourceBuilder), "agg")) .ignoringSort(true) .isEqualTo(getAggregationBuckets(queryContender(searchSourceBuilder), "agg")); + assertTrue(matchResult.getMessage(), matchResult.isMatch()); } - private static XContentBuilder generateDocument(final Instant timestamp) throws IOException { + private List generateDocuments(int numberOfDocuments) throws IOException { + final List documents = new ArrayList<>(); + // This is static in order to be able to identify documents between test runs. + var startingPoint = ZonedDateTime.of(2024, 1, 1, 10, 0, 0, 0, ZoneId.of("UTC")).toInstant(); + for (int i = 0; i < numberOfDocuments; i++) { + documents.add(generateDocument(startingPoint.plus(i, ChronoUnit.SECONDS))); + } + + return documents; + } + + protected XContentBuilder generateDocument(final Instant timestamp) throws IOException { return XContentFactory.jsonBuilder() .startObject() .field("@timestamp", DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(timestamp)) @@ -238,8 +296,14 @@ private static XContentBuilder generateDocument(final Instant timestamp) throws private static List> getQueryHits(final Response response) throws IOException { final Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), response.getEntity().getContent(), true); final Map hitsMap = (Map) map.get("hits"); + final List> hitsList = (List>) hitsMap.get("hits"); - return hitsList.stream().map(hit -> (Map) hit.get("_source")).toList(); + assertThat(hitsList.size(), greaterThan(0)); + + return hitsList.stream() + .sorted(Comparator.comparingInt((Map hit) -> Integer.parseInt((String) hit.get("_id")))) + .map(hit -> (Map) hit.get("_source")) + .toList(); } @SuppressWarnings("unchecked") @@ -247,13 +311,22 @@ private static List> getAggregationBuckets(final Response re final Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), response.getEntity().getContent(), true); final Map aggs = (Map) map.get("aggregations"); final Map agg = (Map) aggs.get(aggName); - return (List>) agg.get("buckets"); + + var buckets = (List>) agg.get("buckets"); + assertThat(buckets.size(), greaterThan(0)); + + return buckets; } private void assertDocumentIndexing(List documents) throws IOException { final Tuple tuple = indexDocuments(() -> documents, () -> documents); + assertThat(tuple.v1().getStatusLine().getStatusCode(), Matchers.equalTo(RestStatus.OK.getStatus())); + var baselineResponseBody = entityAsMap(tuple.v1()); + assertThat("errors in baseline bulk response:\n " + baselineResponseBody, baselineResponseBody.get("errors"), equalTo(false)); + assertThat(tuple.v2().getStatusLine().getStatusCode(), Matchers.equalTo(RestStatus.OK.getStatus())); + var contenderResponseBody = entityAsMap(tuple.v2()); + assertThat("errors in contender bulk response:\n " + contenderResponseBody, contenderResponseBody.get("errors"), equalTo(false)); } - } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java new file mode 100644 index 0000000000000..8bd62480f333d --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa; + +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.logsdb.datageneration.DataGenerator; +import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; +import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; +import org.elasticsearch.logsdb.datageneration.FieldType; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; +import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; +import org.elasticsearch.logsdb.datageneration.fields.PredefinedField; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Challenge test (see {@link StandardVersusLogsIndexModeChallengeRestIT}) that uses randomly generated + * mapping and documents in order to cover more code paths and permutations. + */ +public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { + private final ObjectMapper.Subobjects subobjects; + + private final DataGenerator dataGenerator; + + public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { + super(); + this.subobjects = randomFrom(ObjectMapper.Subobjects.values()); + + var specificationBuilder = DataGeneratorSpecification.builder().withFullyDynamicMapping(randomBoolean()); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + } + this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { + @Override + public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequest.ObjectMappingParametersGenerator request) { + if (subobjects == ObjectMapper.Subobjects.ENABLED) { + // Use default behavior + return null; + } + + assert request.isNested() == false; + + // "enabled: false" is not compatible with subobjects: false + // "dynamic: false/strict/runtime" is not compatible with subobjects: false + return new DataSourceResponse.ObjectMappingParametersGenerator(() -> { + var parameters = new HashMap(); + parameters.put("subobjects", subobjects.toString()); + if (ESTestCase.randomBoolean()) { + parameters.put("dynamic", "true"); + } + if (ESTestCase.randomBoolean()) { + parameters.put("enabled", "true"); + } + return parameters; + }); + } + })) + .withPredefinedFields( + List.of( + new PredefinedField.WithType("host.name", FieldType.KEYWORD), + // Needed for terms query + new PredefinedField.WithGenerator("method", new FieldDataGenerator() { + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "keyword").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(randomFrom("put", "post", "get")); + } + }), + + // Needed for histogram aggregation + new PredefinedField.WithGenerator("memory_usage_bytes", new FieldDataGenerator() { + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "long").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + // We can generate this using standard long field but we would get "too many buckets" + return b -> b.value(randomLongBetween(1000, 2000)); + } + }) + ) + ) + .build()); + } + + @Override + public void baselineMappings(XContentBuilder builder) throws IOException { + dataGenerator.writeMapping(builder); + } + + @Override + public void contenderMappings(XContentBuilder builder) throws IOException { + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + dataGenerator.writeMapping(builder, Map.of("subobjects", subobjects.toString())); + } else { + dataGenerator.writeMapping(builder); + } + } + + @Override + protected XContentBuilder generateDocument(final Instant timestamp) throws IOException { + var document = XContentFactory.jsonBuilder(); + dataGenerator.generateDocument(document, doc -> { + doc.field("@timestamp", DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(timestamp)); + }); + + return document; + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MatcherException.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MatcherException.java deleted file mode 100644 index 442e09f67ac1f..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MatcherException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.logsdb.qa.exceptions; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.XContentBuilder; - -/** - * Generic base class for all types of mismatch errors. - */ -public class MatcherException extends Exception { - - public MatcherException( - final XContentBuilder actualMappings, - final Settings.Builder actualSettings, - final XContentBuilder expectedMappings, - final Settings.Builder expectedSettings, - final String errorMessage - ) { - super(errorMessage(actualMappings, actualSettings, expectedMappings, expectedSettings, errorMessage)); - } - - private static String errorMessage( - final XContentBuilder actualMappings, - final Settings.Builder actualSettings, - final XContentBuilder expectedMappings, - final Settings.Builder expectedSettings, - final String errorMessage - ) { - return "Error [" - + errorMessage - + "] " - + "actual mappings [" - + Strings.toString(actualMappings) - + "] " - + "actual settings [" - + Strings.toString(actualSettings.build()) - + "] " - + "expected mappings [" - + Strings.toString(expectedMappings) - + "] " - + "expected settings [" - + Strings.toString(expectedSettings.build()) - + "] "; - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MismatchTypeMatcherException.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MismatchTypeMatcherException.java deleted file mode 100644 index d0c9b5f544500..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/MismatchTypeMatcherException.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.logsdb.qa.exceptions; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.XContentBuilder; - -public class MismatchTypeMatcherException extends MatcherException { - public MismatchTypeMatcherException( - XContentBuilder actualMappings, - Settings.Builder actualSettings, - XContentBuilder expectedMappings, - Settings.Builder expectedSettings, - String errorMessage - ) { - super(actualMappings, actualSettings, expectedMappings, expectedSettings, errorMessage); - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/NotEqualMatcherException.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/NotEqualMatcherException.java deleted file mode 100644 index 8cd9946e7f065..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/NotEqualMatcherException.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.logsdb.qa.exceptions; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.XContentBuilder; - -public class NotEqualMatcherException extends MatcherException { - public NotEqualMatcherException( - XContentBuilder actualMappings, - Settings.Builder actualSettings, - XContentBuilder expectedMappings, - Settings.Builder expectedSettings, - String errorMessage - ) { - super(actualMappings, actualSettings, expectedMappings, expectedSettings, errorMessage); - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/UncomparableMatcherException.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/UncomparableMatcherException.java deleted file mode 100644 index 44455ec2783f6..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/exceptions/UncomparableMatcherException.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.logsdb.qa.exceptions; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.XContentBuilder; - -public class UncomparableMatcherException extends MatcherException { - public UncomparableMatcherException( - XContentBuilder actualMappings, - Settings.Builder actualSettings, - XContentBuilder expectedMappings, - Settings.Builder expectedSettings, - String errorMessage - ) { - super(actualMappings, actualSettings, expectedMappings, expectedSettings, errorMessage); - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ArrayEqualMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ArrayEqualMatcher.java new file mode 100644 index 0000000000000..a25c525a657d0 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ArrayEqualMatcher.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.prettyPrintArrays; + +class ArrayEqualMatcher extends GenericEqualsMatcher { + ArrayEqualMatcher( + final XContentBuilder actualMappings, + final Settings.Builder actualSettings, + final XContentBuilder expectedMappings, + final Settings.Builder expectedSettings, + final Object[] actual, + final Object[] expected, + boolean ignoringSort + ) { + super(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected, ignoringSort); + } + + @Override + public MatchResult match() { + return matchArraysEqual(actual, expected, ignoringSort); + } + + private MatchResult matchArraysEqual(final Object[] actualArray, final Object[] expectedArray, boolean ignoreSorting) { + if (actualArray.length != expectedArray.length) { + return MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Array lengths do no match, " + prettyPrintArrays(actualArray, expectedArray) + ) + ); + } + if (ignoreSorting) { + return matchArraysEqualIgnoringSorting(actualArray, expectedArray) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Arrays do not match when ignoring sort order, " + prettyPrintArrays(actualArray, expectedArray) + ) + ); + } else { + return matchArraysEqualExact(actualArray, expectedArray) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Arrays do not match exactly, " + prettyPrintArrays(actualArray, expectedArray) + ) + ); + } + } + + private static boolean matchArraysEqualIgnoringSorting(final Object[] actualArray, final Object[] expectedArray) { + final List actualList = Arrays.asList(actualArray); + final List expectedList = Arrays.asList(expectedArray); + return actualList.containsAll(expectedList) && expectedList.containsAll(actualList); + } + + private static boolean matchArraysEqualExact(T[] actualArray, T[] expectedArray) { + for (int i = 0; i < actualArray.length; i++) { + boolean isEqual = actualArray[i].equals(expectedArray[i]); + if (isEqual == false) { + return false; + } + } + return true; + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/EqualMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/EqualMatcher.java deleted file mode 100644 index b3621f2596ae4..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/EqualMatcher.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.logsdb.qa.matchers; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.MatcherException; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.MismatchTypeMatcherException; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.UncomparableMatcherException; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.util.Arrays; -import java.util.List; - -class EqualMatcher extends Matcher { - private final XContentBuilder actualMappings; - private final Settings.Builder actualSettings; - private final XContentBuilder expectedMappings; - private final Settings.Builder expectedSettings; - private final T actual; - private final T expected; - private final boolean ignoringSort; - - EqualMatcher( - XContentBuilder actualMappings, - Settings.Builder actualSettings, - XContentBuilder expectedMappings, - Settings.Builder expectedSettings, - T actual, - T expected, - boolean ignoringSort - ) { - this.actualMappings = actualMappings; - this.actualSettings = actualSettings; - this.expectedMappings = expectedMappings; - this.expectedSettings = expectedSettings; - this.actual = actual; - this.expected = expected; - this.ignoringSort = ignoringSort; - } - - @SuppressWarnings("unchecked") - public boolean match() throws MatcherException { - if (actual == null) { - if (expected == null) { - throw new UncomparableMatcherException( - actualMappings, - actualSettings, - expectedMappings, - expectedSettings, - "Both 'actual' and 'expected' are null" - ); - } - return false; - } - if (expected == null) { - return false; - } - if (actual.getClass().equals(expected.getClass()) == false) { - throw new MismatchTypeMatcherException( - actualMappings, - actualSettings, - expectedMappings, - expectedSettings, - "Unable to match " + actual.getClass().getSimpleName() + " to " + expected.getClass().getSimpleName() - ); - } - if (actual.getClass().isArray()) { - return matchArraysEqual((T[]) actual, (T[]) expected, ignoringSort); - } - if (actual instanceof List act && expected instanceof List exp) { - return matchArraysEqual((T[]) (act).toArray(), (T[]) (exp).toArray(), ignoringSort); - } - return actual.equals(expected); - } - - private boolean matchArraysEqual(final T[] actualArray, final T[] expectedArray, boolean ignoreSorting) { - if (actualArray.length != expectedArray.length) { - return false; - } - if (ignoreSorting) { - return matchArraysEqualIgnoringSorting(actualArray, expectedArray) != false; - } else { - return matchArraysEqualExact(actualArray, expectedArray) != false; - } - } - - private static boolean matchArraysEqualIgnoringSorting(T[] actualArray, T[] expectedArray) { - final List actualList = Arrays.asList(actualArray); - final List expectedList = Arrays.asList(expectedArray); - return actualList.containsAll(expectedList) && expectedList.containsAll(actualList); - } - - private static boolean matchArraysEqualExact(T[] actualArray, T[] expectedArray) { - for (int i = 0; i < actualArray.length; i++) { - boolean isEqual = actualArray[i].equals(expectedArray[i]); - if (isEqual == false) { - return true; - } - } - return false; - } -} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/GenericEqualsMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/GenericEqualsMatcher.java new file mode 100644 index 0000000000000..6feae5c1ccabc --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/GenericEqualsMatcher.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.List; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; + +public class GenericEqualsMatcher extends Matcher { + protected final XContentBuilder actualMappings; + protected final Settings.Builder actualSettings; + protected final XContentBuilder expectedMappings; + protected final Settings.Builder expectedSettings; + protected final T actual; + protected final T expected; + protected final boolean ignoringSort; + + protected GenericEqualsMatcher( + XContentBuilder actualMappings, + Settings.Builder actualSettings, + XContentBuilder expectedMappings, + Settings.Builder expectedSettings, + T actual, + T expected, + boolean ignoringSort + ) { + this.actualMappings = actualMappings; + this.actualSettings = actualSettings; + this.expectedMappings = expectedMappings; + this.expectedSettings = expectedSettings; + this.actual = actual; + this.expected = expected; + this.ignoringSort = ignoringSort; + } + + public MatchResult match() { + if (actual == null) { + if (expected == null) { + + return MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Both 'actual' and 'expected' are null" + ) + ); + } + return MatchResult.noMatch( + formatErrorMessage(actualMappings, actualSettings, expectedMappings, expectedSettings, "Expected is null but actual is not") + ); + } + if (expected == null) { + return MatchResult.noMatch( + formatErrorMessage(actualMappings, actualSettings, expectedMappings, expectedSettings, "Actual is null but expected is not") + ); + } + if (actual.getClass().equals(expected.getClass()) == false) { + return MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Unable to match " + actual.getClass().getSimpleName() + " to " + expected.getClass().getSimpleName() + ) + ); + } + if (actual.getClass().isArray()) { + return new ArrayEqualMatcher( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + (Object[]) actual, + (Object[]) expected, + ignoringSort + ).match(); + } + if (actual instanceof List act && expected instanceof List exp) { + return new ListEqualMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings, act, exp, ignoringSort).match(); + } + return new ObjectMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected).match(); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ListEqualMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ListEqualMatcher.java new file mode 100644 index 0000000000000..ae18129a77111 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ListEqualMatcher.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.List; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.prettyPrintCollections; + +public class ListEqualMatcher extends GenericEqualsMatcher> { + public ListEqualMatcher( + final XContentBuilder actualMappings, + final Settings.Builder actualSettings, + final XContentBuilder expectedMappings, + final Settings.Builder expectedSettings, + final List actual, + final List expected, + final boolean ignoringSort + ) { + super(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected, ignoringSort); + } + + @Override + @SuppressWarnings("unchecked") + public MatchResult match() { + return matchListEquals((List) actual, (List) expected, ignoringSort); + } + + private MatchResult matchListEquals(final List actualList, final List expectedList, boolean ignoreSorting) { + if (actualList.size() != expectedList.size()) { + return MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "List lengths do not match, " + prettyPrintCollections(actualList, expectedList) + ) + ); + } + if (ignoreSorting) { + return matchListsEqualIgnoringSorting(actualList, expectedList) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Lists do not match when ignoring sort order, " + prettyPrintCollections(actualList, expectedList) + ) + ); + } else { + return matchListsEqualExact(actualList, expectedList) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Lists do not match exactly, " + prettyPrintCollections(actualList, expectedList) + ) + ); + } + } + + private static boolean matchListsEqualIgnoringSorting(final List actualList, final List expectedList) { + return actualList.containsAll(expectedList) && expectedList.containsAll(actualList); + } + + private static boolean matchListsEqualExact(List actualList, List expectedList) { + for (int i = 0; i < actualList.size(); i++) { + boolean isEqual = actualList.get(i).equals(expectedList.get(i)); + if (isEqual == false) { + return false; + } + } + return true; + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/MatchResult.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/MatchResult.java new file mode 100644 index 0000000000000..07a57dcca3b71 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/MatchResult.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import java.util.Objects; + +public class MatchResult { + private final boolean isMatch; + private final String message; + + private MatchResult(boolean isMatch, String message) { + this.isMatch = isMatch; + this.message = message; + } + + public static MatchResult match() { + return new MatchResult(true, "Match successful"); + } + + public static MatchResult noMatch(final String reason) { + return new MatchResult(false, reason); + } + + public boolean isMatch() { + return isMatch; + } + + public String getMessage() { + return message; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MatchResult that = (MatchResult) o; + return isMatch == that.isMatch && Objects.equals(message, that.message); + } + + @Override + public int hashCode() { + return Objects.hash(isMatch, message); + } + + @Override + public String toString() { + return "MatchResult{" + "isMatch=" + isMatch + ", message='" + message + '\'' + '}'; + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Matcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Matcher.java index 50afda072c4d5..fc2aec1c4fa49 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Matcher.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Matcher.java @@ -9,10 +9,12 @@ package org.elasticsearch.datastreams.logsdb.qa.matchers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.MatcherException; -import org.elasticsearch.datastreams.logsdb.qa.exceptions.NotEqualMatcherException; +import org.elasticsearch.datastreams.logsdb.qa.matchers.source.SourceMatcher; import org.elasticsearch.xcontent.XContentBuilder; +import java.util.List; +import java.util.Map; + /** * A base class to be used for the matching logic when comparing query results. */ @@ -22,6 +24,14 @@ public static SettingsStep mappings(final XContentBuilder actualMappings, return new Builder<>(expectedMappings, actualMappings); } + public static MappingsStep>> matchSource() { + return new SourceMatcherBuilder(); + } + + public interface MappingsStep { + SettingsStep mappings(XContentBuilder actualMappings, XContentBuilder expectedMappings); + } + public interface SettingsStep { ExpectedStep settings(Settings.Builder actualSettings, Settings.Builder expectedSettings); } @@ -31,13 +41,12 @@ public interface ExpectedStep { } public interface CompareStep { - void isEqualTo(T actual) throws MatcherException; + MatchResult isEqualTo(T actual); CompareStep ignoringSort(boolean ignoringSort); } private static class Builder implements SettingsStep, CompareStep, ExpectedStep { - private final XContentBuilder expectedMappings; private final XContentBuilder actualMappings; private Settings.Builder expectedSettings; @@ -63,8 +72,8 @@ private Builder( } @Override - public void isEqualTo(T actual) throws MatcherException { - boolean match = new EqualMatcher<>( + public MatchResult isEqualTo(T actual) { + return new GenericEqualsMatcher<>( actualMappings, actualSettings, expectedMappings, @@ -73,15 +82,6 @@ public void isEqualTo(T actual) throws MatcherException { expected, ignoringSort ).match(); - if (match == false) { - throw new NotEqualMatcherException( - actualMappings, - actualSettings, - expectedMappings, - expectedSettings, - "actual [" + actual + "] not equal to [" + expected + "]" - ); - } } @Override @@ -97,4 +97,54 @@ public CompareStep expected(T expected) { } } + private static class SourceMatcherBuilder + implements + MappingsStep>>, + SettingsStep>>, + CompareStep>>, + ExpectedStep>> { + private XContentBuilder expectedMappings; + private XContentBuilder actualMappings; + private Settings.Builder expectedSettings; + private Settings.Builder actualSettings; + private List> expected; + private boolean ignoringSort; + + @Override + public ExpectedStep>> settings(Settings.Builder actualSettings, Settings.Builder expectedSettings) { + this.actualSettings = actualSettings; + this.expectedSettings = expectedSettings; + return this; + } + + private SourceMatcherBuilder() {} + + public SettingsStep>> mappings( + final XContentBuilder actualMappings, + final XContentBuilder expectedMappings + ) { + this.actualMappings = actualMappings; + this.expectedMappings = expectedMappings; + + return this; + } + + @Override + public MatchResult isEqualTo(List> actual) { + return new SourceMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected, ignoringSort) + .match(); + } + + @Override + public CompareStep>> ignoringSort(boolean ignoringSort) { + this.ignoringSort = ignoringSort; + return this; + } + + @Override + public CompareStep>> expected(List> expected) { + this.expected = expected; + return this; + } + } } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Messages.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Messages.java new file mode 100644 index 0000000000000..db67a8c3d40d0 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/Messages.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +public class Messages { + public static String formatErrorMessage( + final XContentBuilder actualMappings, + final Settings.Builder actualSettings, + final XContentBuilder expectedMappings, + final Settings.Builder expectedSettings, + final String errorMessage + ) { + return "Error [" + + errorMessage + + "] " + + "actual mappings [" + + Strings.toString(actualMappings) + + "] " + + "actual settings [" + + Strings.toString(actualSettings.build()) + + "] " + + "expected mappings [" + + Strings.toString(expectedMappings) + + "] " + + "expected settings [" + + Strings.toString(expectedSettings.build()) + + "] "; + } + + public static String prettyPrintArrays(final Object[] actualArray, final Object[] expectedArray) { + return "actual: " + + prettyPrintCollection(Arrays.asList(actualArray)) + + ", expected: " + + prettyPrintCollection(Arrays.asList(expectedArray)); + } + + public static String prettyPrintCollections(final Collection actualList, final Collection expectedList) { + return "actual: " + prettyPrintCollection(actualList) + ", expected: " + prettyPrintCollection(expectedList); + } + + private static String prettyPrintCollection(final Collection list) { + return "[" + list.stream().map(Object::toString).collect(Collectors.joining(", ")) + "]"; + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ObjectMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ObjectMatcher.java new file mode 100644 index 0000000000000..81844131c58a2 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/ObjectMatcher.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; + +public class ObjectMatcher extends GenericEqualsMatcher { + ObjectMatcher( + final XContentBuilder actualMappings, + final Settings.Builder actualSettings, + final XContentBuilder expectedMappings, + final Settings.Builder expectedSettings, + final Object actual, + final Object expected + ) { + super(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected, true); + } + + @Override + public MatchResult match() { + return actual.equals(expected) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Actual does not equal expected, actual: " + actual + ", expected: " + expected + ) + ); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/DynamicFieldMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/DynamicFieldMatcher.java new file mode 100644 index 0000000000000..dc371aae82ce1 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/DynamicFieldMatcher.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers.source; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.logsdb.qa.matchers.MatchResult; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.prettyPrintCollections; + +class DynamicFieldMatcher { + private final XContentBuilder actualMappings; + private final Settings.Builder actualSettings; + private final XContentBuilder expectedMappings; + private final Settings.Builder expectedSettings; + + DynamicFieldMatcher( + XContentBuilder actualMappings, + Settings.Builder actualSettings, + XContentBuilder expectedMappings, + Settings.Builder expectedSettings + ) { + this.actualMappings = actualMappings; + this.actualSettings = actualSettings; + this.expectedMappings = expectedMappings; + this.expectedSettings = expectedSettings; + } + + /** + * Performs matching of dynamically mapped field values if they need special treatment. + * @return {#{@link MatchResult}} if field values need special treatment by this matcher. + * If field values can be matched using generic mapper, returns {@link Optional#empty()}. + */ + public Optional match(List actual, List expected) { + if (expected == null) { + return Optional.empty(); + } + + // Floating point values are always mapped as float with dynamic mapping. + var isDouble = expected.stream().filter(Objects::nonNull).findFirst().map(o -> o instanceof Double).orElse(false); + if (isDouble) { + assert expected.stream().allMatch(o -> o == null || o instanceof Double); + + var normalizedActual = normalizeDoubles(actual); + var normalizedExpected = normalizeDoubles(expected); + + var matchResult = normalizedActual.equals(normalizedExpected) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Values of dynamically mapped field containing double values don't match after normalization, normalized " + + prettyPrintCollections(normalizedActual, normalizedExpected) + ) + ); + return Optional.of(matchResult); + } + + return Optional.empty(); + } + + private static Set normalizeDoubles(List values) { + if (values == null) { + return Set.of(); + } + + Function toFloat = (o) -> o instanceof Number n ? n.floatValue() : Float.parseFloat((String) o); + return values.stream().filter(Objects::nonNull).map(toFloat).collect(Collectors.toSet()); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/FieldSpecificMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/FieldSpecificMatcher.java new file mode 100644 index 0000000000000..253fb4b0e9688 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/FieldSpecificMatcher.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers.source; + +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.logsdb.qa.matchers.MatchResult; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.math.BigInteger; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.prettyPrintCollections; + +interface FieldSpecificMatcher { + MatchResult match(List actual, List expected, Map actualMapping, Map expectedMapping); + + class HalfFloatMatcher implements FieldSpecificMatcher { + private final XContentBuilder actualMappings; + private final Settings.Builder actualSettings; + private final XContentBuilder expectedMappings; + private final Settings.Builder expectedSettings; + + HalfFloatMatcher( + XContentBuilder actualMappings, + Settings.Builder actualSettings, + XContentBuilder expectedMappings, + Settings.Builder expectedSettings + ) { + this.actualMappings = actualMappings; + this.actualSettings = actualSettings; + this.expectedMappings = expectedMappings; + this.expectedSettings = expectedSettings; + } + + @Override + public MatchResult match( + List actual, + List expected, + Map actualMapping, + Map expectedMapping + ) { + var actualHalfFloatBytes = normalize(actual); + var expectedHalfFloatBytes = normalize(expected); + + return actualHalfFloatBytes.equals(expectedHalfFloatBytes) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Values of type [half_float] don't match after normalization, normalized " + + prettyPrintCollections(actualHalfFloatBytes, expectedHalfFloatBytes) + ) + ); + } + + private static Set normalize(List values) { + if (values == null) { + return Set.of(); + } + + Function toFloat = (o) -> o instanceof Number n ? n.floatValue() : Float.parseFloat((String) o); + return values.stream() + .filter(Objects::nonNull) + .map(toFloat) + // Based on logic in NumberFieldMapper + .map(HalfFloatPoint::halfFloatToSortableShort) + .collect(Collectors.toSet()); + } + } + + class ScaledFloatMatcher implements FieldSpecificMatcher { + private final XContentBuilder actualMappings; + private final Settings.Builder actualSettings; + private final XContentBuilder expectedMappings; + private final Settings.Builder expectedSettings; + + ScaledFloatMatcher( + XContentBuilder actualMappings, + Settings.Builder actualSettings, + XContentBuilder expectedMappings, + Settings.Builder expectedSettings + ) { + this.actualMappings = actualMappings; + this.actualSettings = actualSettings; + this.expectedMappings = expectedMappings; + this.expectedSettings = expectedSettings; + } + + @Override + public MatchResult match( + List actual, + List expected, + Map actualMapping, + Map expectedMapping + ) { + var scalingFactor = actualMapping.get("scaling_factor"); + var expectedScalingFactor = expectedMapping.get("scaling_factor"); + if (Objects.equals(scalingFactor, expectedScalingFactor) == false) { + throw new IllegalStateException("Scaling factor for scaled_float field does not match between actual and expected mapping"); + } + + assert scalingFactor instanceof Number; + var expectedNormalized = normalizeExpected(expected, ((Number) scalingFactor).doubleValue()); + var actualNormalized = normalizeActual(actual); + + return actualNormalized.equals(expectedNormalized) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Values of type [scaled_float] don't match after normalization, normalized " + + prettyPrintCollections(actualNormalized, expectedNormalized) + ) + ); + } + + private static Set normalizeExpected(List values, double scalingFactor) { + if (values == null) { + return Set.of(); + } + + return values.stream() + .filter(Objects::nonNull) + .map(ScaledFloatMatcher::toDouble) + // Based on logic in ScaledFloatFieldMapper + .map(v -> { + var encoded = Math.round(v * scalingFactor); + return encoded / scalingFactor; + }) + .collect(Collectors.toSet()); + } + + private static Set normalizeActual(List values) { + if (values == null) { + return Set.of(); + } + + return values.stream().filter(Objects::nonNull).map(ScaledFloatMatcher::toDouble).collect(Collectors.toSet()); + } + + private static double toDouble(Object value) { + return ((Number) value).doubleValue(); + } + } + + class UnsignedLongMatcher implements FieldSpecificMatcher { + private final XContentBuilder actualMappings; + private final Settings.Builder actualSettings; + private final XContentBuilder expectedMappings; + private final Settings.Builder expectedSettings; + + UnsignedLongMatcher( + XContentBuilder actualMappings, + Settings.Builder actualSettings, + XContentBuilder expectedMappings, + Settings.Builder expectedSettings + ) { + this.actualMappings = actualMappings; + this.actualSettings = actualSettings; + this.expectedMappings = expectedMappings; + this.expectedSettings = expectedSettings; + } + + @Override + public MatchResult match( + List actual, + List expected, + Map actualMapping, + Map expectedMapping + ) { + var expectedNormalized = normalize(expected); + var actualNormalized = normalize(actual); + + return actualNormalized.equals(expectedNormalized) + ? MatchResult.match() + : MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Values of type [unsigned_long] don't match after normalization, normalized " + + prettyPrintCollections(actualNormalized, expectedNormalized) + ) + ); + } + + private static Set normalize(List values) { + if (values == null) { + return Set.of(); + } + + return values.stream().filter(Objects::nonNull).map(UnsignedLongMatcher::toBigInteger).collect(Collectors.toSet()); + } + + private static BigInteger toBigInteger(Object value) { + if (value instanceof String s) { + return new BigInteger(s, 10); + } + if (value instanceof Long l) { + return BigInteger.valueOf(l); + } + + return (BigInteger) value; + } + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/MappingTransforms.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/MappingTransforms.java new file mode 100644 index 0000000000000..eade6f10e48fe --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/MappingTransforms.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers.source; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +class MappingTransforms { + /** + * Container for mapping of a field. Contains field mapping parameters and mapping parameters of parent fields (if present) + * in order of increasing distance (direct parent first). + * This is needed because some parent mapping parameters influence how source of the field is stored (e.g. `enabled: false`). + * @param mappingParameters + * @param parentMappingParameters + */ + record FieldMapping(Map mappingParameters, List> parentMappingParameters) {} + + /** + * Normalize mapping to have the same structure as normalized source and enable field mapping lookup. + * Similar to {@link SourceTransforms#normalize(Map)} but needs to get rid of intermediate nodes + * and collect results into a different data structure. + * + * @param map raw mapping document converted to map + * @return map from normalized field name (like a.b.c) to a map of mapping parameters (like type) + */ + public static Map normalizeMapping(Map map) { + var flattened = new HashMap(); + + descend(null, map, flattened); + + return flattened; + } + + @SuppressWarnings("unchecked") + private static void descend(String pathFromRoot, Map currentLevel, Map flattened) { + for (var entry : currentLevel.entrySet()) { + if (entry.getKey().equals("_doc") || entry.getKey().equals("properties")) { + descend(pathFromRoot, (Map) entry.getValue(), flattened); + } else { + if (entry.getValue() instanceof Map map) { + var pathToField = pathFromRoot == null ? entry.getKey() : pathFromRoot + "." + entry.getKey(); + + // Descending to subobject, we need to remember parent mapping + if (pathFromRoot != null) { + var parentMapping = flattened.computeIfAbsent( + pathFromRoot, + k -> new FieldMapping(new HashMap<>(), new ArrayList<>()) + ); + var childMapping = flattened.computeIfAbsent( + pathToField, + k -> new FieldMapping(new HashMap<>(), new ArrayList<>()) + ); + childMapping.parentMappingParameters.add(parentMapping.mappingParameters); + childMapping.parentMappingParameters.addAll(parentMapping.parentMappingParameters); + } + + descend(pathToField, (Map) map, flattened); + } else { + var pathToField = pathFromRoot == null ? "_doc" : pathFromRoot; + // We are either at the lowest level of mapping or it's a leaf field of top level object + flattened.computeIfAbsent(pathToField, k -> new FieldMapping(new HashMap<>(), new ArrayList<>())).mappingParameters.put( + entry.getKey(), + entry.getValue() + ); + } + } + } + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceMatcher.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceMatcher.java new file mode 100644 index 0000000000000..5eb93cee67d74 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceMatcher.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers.source; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.datastreams.logsdb.qa.matchers.GenericEqualsMatcher; +import org.elasticsearch.datastreams.logsdb.qa.matchers.ListEqualMatcher; +import org.elasticsearch.datastreams.logsdb.qa.matchers.MatchResult; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.formatErrorMessage; +import static org.elasticsearch.datastreams.logsdb.qa.matchers.Messages.prettyPrintCollections; + +public class SourceMatcher extends GenericEqualsMatcher>> { + private final Map actualNormalizedMapping; + private final Map expectedNormalizedMapping; + + private final Map fieldSpecificMatchers; + private final DynamicFieldMatcher dynamicFieldMatcher; + + public SourceMatcher( + final XContentBuilder actualMappings, + final Settings.Builder actualSettings, + final XContentBuilder expectedMappings, + final Settings.Builder expectedSettings, + final List> actual, + final List> expected, + final boolean ignoringSort + ) { + super(actualMappings, actualSettings, expectedMappings, expectedSettings, actual, expected, ignoringSort); + + var actualMappingAsMap = XContentHelper.convertToMap(BytesReference.bytes(actualMappings), false, actualMappings.contentType()) + .v2(); + this.actualNormalizedMapping = MappingTransforms.normalizeMapping(actualMappingAsMap); + + var expectedMappingAsMap = XContentHelper.convertToMap(BytesReference.bytes(expectedMappings), false, actualMappings.contentType()) + .v2(); + this.expectedNormalizedMapping = MappingTransforms.normalizeMapping(expectedMappingAsMap); + + this.fieldSpecificMatchers = Map.of( + "half_float", + new FieldSpecificMatcher.HalfFloatMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings), + "scaled_float", + new FieldSpecificMatcher.ScaledFloatMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings), + "unsigned_long", + new FieldSpecificMatcher.UnsignedLongMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings) + ); + this.dynamicFieldMatcher = new DynamicFieldMatcher(actualMappings, actualSettings, expectedMappings, expectedSettings); + } + + @Override + public MatchResult match() { + if (actual.size() != expected.size()) { + return MatchResult.noMatch( + formatErrorMessage( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + "Number of documents does not match, " + prettyPrintCollections(actual, expected) + ) + ); + } + + var sortedAndFlattenedActual = actual.stream().map(SourceTransforms::normalize).toList(); + var sortedAndFlattenedExpected = expected.stream().map(SourceTransforms::normalize).toList(); + + for (int i = 0; i < sortedAndFlattenedActual.size(); i++) { + var actual = sortedAndFlattenedActual.get(i); + var expected = sortedAndFlattenedExpected.get(i); + + var result = compareSource(actual, expected); + if (result.isMatch() == false) { + var message = "Source matching failed at document id [" + i + "]. " + result.getMessage(); + return MatchResult.noMatch(message); + } + } + + return MatchResult.match(); + } + + private MatchResult compareSource(Map> actual, Map> expected) { + for (var expectedFieldEntry : expected.entrySet()) { + var name = expectedFieldEntry.getKey(); + + var actualValues = actual.get(name); + var expectedValues = expectedFieldEntry.getValue(); + + // There are cases when field values are stored in ignored source + // so we try to match them as is first and then apply field specific matcher. + // This is temporary, we should be able to tell when source is exact using mappings. + // See #111916. + var genericMatchResult = matchWithGenericMatcher(actualValues, expectedValues); + if (genericMatchResult.isMatch()) { + return genericMatchResult; + } + + var matchIncludingFieldSpecificMatchers = matchWithFieldSpecificMatcher(name, actualValues, expectedValues).orElse( + genericMatchResult + ); + if (matchIncludingFieldSpecificMatchers.isMatch() == false) { + var message = "Source documents don't match for field [" + name + "]: " + matchIncludingFieldSpecificMatchers.getMessage(); + return MatchResult.noMatch(message); + } + } + + return MatchResult.match(); + } + + private Optional matchWithFieldSpecificMatcher(String fieldName, List actualValues, List expectedValues) { + var actualFieldMapping = actualNormalizedMapping.get(fieldName); + if (actualFieldMapping == null) { + if (expectedNormalizedMapping.get(fieldName) != null + // Special cases due to fields being defined in default mapping for logsdb index mode + && fieldName.equals("@timestamp") == false + && fieldName.equals("host.name") == false) { + throw new IllegalStateException( + "Leaf field [" + fieldName + "] is present in expected mapping but absent in actual mapping" + ); + } + + // Field is dynamically mapped + return dynamicFieldMatcher.match(actualValues, expectedValues); + } + + var actualFieldType = (String) actualFieldMapping.mappingParameters().get("type"); + if (actualFieldType == null) { + throw new IllegalStateException("Field type is missing from leaf field Leaf field [" + fieldName + "] mapping parameters"); + } + + var expectedFieldMapping = expectedNormalizedMapping.get(fieldName); + if (expectedFieldMapping == null) { + throw new IllegalStateException("Leaf field [" + fieldName + "] is present in actual mapping but absent in expected mapping"); + } else { + var expectedFieldType = expectedFieldMapping.mappingParameters().get("type"); + if (Objects.equals(actualFieldType, expectedFieldType) == false) { + throw new IllegalStateException( + "Leaf field [" + + fieldName + + "] has type [" + + actualFieldType + + "] in actual mapping but a different type [" + + expectedFieldType + + "] in expected mapping" + ); + } + } + + if (sourceMatchesExactly(expectedFieldMapping, expectedValues)) { + return Optional.empty(); + } + + var fieldSpecificMatcher = fieldSpecificMatchers.get(actualFieldType); + if (fieldSpecificMatcher == null) { + return Optional.empty(); + } + + MatchResult matched = fieldSpecificMatcher.match( + actualValues, + expectedValues, + actualFieldMapping.mappingParameters(), + expectedFieldMapping.mappingParameters() + ); + return Optional.of(matched); + } + + // Checks for scenarios when source is stored exactly and therefore can be compared without special logic. + private boolean sourceMatchesExactly(MappingTransforms.FieldMapping mapping, List expectedValues) { + return mapping.parentMappingParameters().stream().anyMatch(m -> m.getOrDefault("enabled", "true").equals("false")); + } + + private MatchResult matchWithGenericMatcher(List actualValues, List expectedValues) { + var genericListMatcher = new ListEqualMatcher( + actualMappings, + actualSettings, + expectedMappings, + expectedSettings, + SourceTransforms.normalizeValues(actualValues), + SourceTransforms.normalizeValues(expectedValues), + true + ); + + return genericListMatcher.match(); + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceTransforms.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceTransforms.java new file mode 100644 index 0000000000000..d7d5bf369b018 --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/matchers/source/SourceTransforms.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.logsdb.qa.matchers.source; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +class SourceTransforms { + /** + * This preprocessing step makes it easier to match the document using a unified structure. + * It performs following modifications: + *
    + *
  • Flattens all nested maps into top level map with full field path as key (e.g. "a.b.c.d")
  • + *
  • Transforms all field values to arrays of length >= 1
  • + *
+ *

+ * It also makes it possible to work with subobjects: false/auto settings. + * + * @return flattened map + */ + public static Map> normalize(Map map) { + var flattened = new HashMap>(); + + descend(null, map, flattened); + + return flattened; + } + + public static List normalizeValues(List values) { + if (values == null) { + return Collections.emptyList(); + } + + return normalizeValues(values, Function.identity()); + } + + public static List normalizeValues(List values, Function transform) { + if (values == null) { + return Collections.emptyList(); + } + + // Synthetic source modifications: + // * null values are not present + // * duplicates are removed + return new ArrayList<>( + values.stream().filter(v -> v != null && Objects.equals(v, "null") == false).map(transform).collect(Collectors.toSet()) + ); + } + + private static void descend(String pathFromRoot, Map currentLevel, Map> flattened) { + for (var entry : currentLevel.entrySet()) { + var pathToCurrentField = pathFromRoot == null ? entry.getKey() : pathFromRoot + "." + entry.getKey(); + if (entry.getValue() instanceof List list) { + for (var fieldValue : list) { + handleField(pathToCurrentField, fieldValue, flattened); + } + } else { + handleField(pathToCurrentField, entry.getValue(), flattened); + } + } + } + + @SuppressWarnings("unchecked") + private static void handleField(String pathToCurrentField, Object currentField, Map> flattened) { + if (currentField instanceof Map map) { + descend(pathToCurrentField, (Map) map, flattened); + } else { + flattened.computeIfAbsent(pathToCurrentField, k -> new ArrayList<>()).add(currentField); + } + } +} diff --git a/modules/data-streams/src/javaRestTest/resources/roles.yml b/modules/data-streams/src/javaRestTest/resources/roles.yml index 63e506dff8d39..74c238fdae4f2 100644 --- a/modules/data-streams/src/javaRestTest/resources/roles.yml +++ b/modules/data-streams/src/javaRestTest/resources/roles.yml @@ -16,10 +16,4 @@ under_privilged: - read - write - view_index_metadata -manage_data_stream_global_retention: - cluster: - - manage_data_stream_global_retention -monitor_data_stream_global_retention: - cluster: - - monitor_data_stream_global_retention -no_privilege: \ No newline at end of file +no_privilege: diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 02663731a1716..615c0006a4ce6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -22,9 +22,6 @@ import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -44,12 +41,8 @@ import org.elasticsearch.datastreams.action.PromoteDataStreamTransportAction; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.datastreams.lifecycle.UpdateDataStreamGlobalRetentionService; -import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamLifecycleAction; -import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.action.GetDataStreamLifecycleStatsAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; import org.elasticsearch.datastreams.lifecycle.action.TransportDeleteDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; @@ -139,7 +132,6 @@ public static TimeValue getLookAheadTime(Settings settings) { private final SetOnce dataLifecycleInitialisationService = new SetOnce<>(); private final SetOnce dataStreamLifecycleErrorsPublisher = new SetOnce<>(); private final SetOnce dataStreamLifecycleHealthIndicatorService = new SetOnce<>(); - private final SetOnce dataStreamGlobalRetentionService = new SetOnce<>(); private final Settings settings; public DataStreamsPlugin(Settings settings) { @@ -209,19 +201,15 @@ public Collection createComponents(PluginServices services) { errorStoreInitialisationService.get(), services.allocationService(), dataStreamLifecycleErrorsPublisher.get(), - services.dataStreamGlobalRetentionResolver() + services.dataStreamGlobalRetentionSettings() ) ); dataLifecycleInitialisationService.get().init(); dataStreamLifecycleHealthIndicatorService.set(new DataStreamLifecycleHealthIndicatorService()); - dataStreamGlobalRetentionService.set( - new UpdateDataStreamGlobalRetentionService(services.clusterService(), services.dataStreamGlobalRetentionResolver()) - ); components.add(errorStoreInitialisationService.get()); components.add(dataLifecycleInitialisationService.get()); components.add(dataStreamLifecycleErrorsPublisher.get()); - components.add(dataStreamGlobalRetentionService.get()); return components; } @@ -240,24 +228,6 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class)); actions.add(new ActionHandler<>(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class)); - actions.add( - new ActionHandler<>( - PutDataStreamGlobalRetentionAction.INSTANCE, - PutDataStreamGlobalRetentionAction.TransportPutDataStreamGlobalRetentionAction.class - ) - ); - actions.add( - new ActionHandler<>( - GetDataStreamGlobalRetentionAction.INSTANCE, - GetDataStreamGlobalRetentionAction.TransportGetDataStreamGlobalSettingsAction.class - ) - ); - actions.add( - new ActionHandler<>( - DeleteDataStreamGlobalRetentionAction.INSTANCE, - DeleteDataStreamGlobalRetentionAction.TransportDeleteDataStreamGlobalRetentionAction.class - ) - ); return actions; } @@ -293,14 +263,6 @@ public List getRestHandlers( return handlers; } - @Override - public List getNamedWriteables() { - return List.of( - new NamedWriteableRegistry.Entry(ClusterState.Custom.class, DataStreamGlobalRetention.TYPE, DataStreamGlobalRetention::read), - new NamedWriteableRegistry.Entry(NamedDiff.class, DataStreamGlobalRetention.TYPE, DataStreamGlobalRetention::readDiffFrom) - ); - } - @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { return List.of(new DataStreamIndexSettingsProvider(parameters.mapperServiceFactory())); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/CreateDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/CreateDataStreamTransportAction.java index 36f5ecaadd446..77e9d20e34c0e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/CreateDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/CreateDataStreamTransportAction.java @@ -18,10 +18,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1b18f8b799f4d..d85522d7099b6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; @@ -34,6 +33,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java index a614a2dc40e25..8f50f07c358fc 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java @@ -26,12 +26,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotInProgressException; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index 8017b1c72f862..dcca32355082b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -21,14 +21,13 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -37,6 +36,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -57,7 +57,7 @@ public class GetDataStreamsTransportAction extends TransportMasterNodeReadAction private static final Logger LOGGER = LogManager.getLogger(GetDataStreamsTransportAction.class); private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionResolver dataStreamGlobalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public GetDataStreamsTransportAction( @@ -67,7 +67,7 @@ public GetDataStreamsTransportAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, - DataStreamGlobalRetentionResolver dataStreamGlobalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamAction.NAME, @@ -81,7 +81,7 @@ public GetDataStreamsTransportAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.systemIndices = systemIndices; - this.dataStreamGlobalRetentionResolver = dataStreamGlobalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); } @@ -93,7 +93,7 @@ protected void masterOperation( ActionListener listener ) throws Exception { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, dataStreamGlobalRetentionResolver) + innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings) ); } @@ -103,7 +103,7 @@ static GetDataStreamAction.Response innerOperation( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, ClusterSettings clusterSettings, - DataStreamGlobalRetentionResolver dataStreamGlobalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); @@ -223,7 +223,7 @@ public int compareTo(IndexInfo o) { return new GetDataStreamAction.Response( dataStreamInfos, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - dataStreamGlobalRetentionResolver.resolve(state) + globalRetentionSettings.get() ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/MigrateToDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/MigrateToDataStreamTransportAction.java index adbbfe7b28541..cdb342a3ddf3b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/MigrateToDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/MigrateToDataStreamTransportAction.java @@ -19,9 +19,9 @@ import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataMigrateToDataStreamService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ModifyDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ModifyDataStreamsTransportAction.java index 97f081575d748..777d239e0ace5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ModifyDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ModifyDataStreamsTransportAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/PromoteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/PromoteDataStreamTransportAction.java index e048393494139..c901b7a707b6c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/PromoteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/PromoteDataStreamTransportAction.java @@ -22,10 +22,10 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index aabe865f9fe1d..99d4f8bb7cd28 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -44,7 +44,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -95,8 +95,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.STARTED; -import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.SUCCESS; -import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.UNKNOWN; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_DOWNSAMPLE_STATUS; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; @@ -164,7 +162,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab final ResultDeduplicator transportActionsDeduplicator; final ResultDeduplicator clusterStateChangesDeduplicator; private final DataStreamLifecycleHealthInfoPublisher dslHealthInfoPublisher; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -213,7 +211,7 @@ public DataStreamLifecycleService( DataStreamLifecycleErrorStore errorStore, AllocationService allocationService, DataStreamLifecycleHealthInfoPublisher dataStreamLifecycleHealthInfoPublisher, - DataStreamGlobalRetentionResolver globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.settings = settings; this.client = client; @@ -224,7 +222,7 @@ public DataStreamLifecycleService( this.clusterStateChangesDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext()); this.nowSupplier = nowSupplier; this.errorStore = errorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; this.scheduledJob = null; this.pollInterval = DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); this.targetMergePolicyFloorSegment = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(settings); @@ -298,13 +296,13 @@ public void close() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(LIFECYCLE_JOB_NAME)) { + if (event.jobName().equals(LIFECYCLE_JOB_NAME)) { if (this.isMaster) { logger.trace( "Data stream lifecycle job triggered: {}, {}, {}", - event.getJobName(), - event.getScheduledTime(), - event.getTriggeredTime() + event.jobName(), + event.scheduledTime(), + event.triggeredTime() ); run(clusterService.state()); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @@ -821,8 +819,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo RolloverRequest rolloverRequest = getDefaultRolloverRequest( rolloverConfiguration, dataStream.getName(), - dataStream.getLifecycle() - .getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionResolver.resolve(state)), + dataStream.getLifecycle().getEffectiveDataRetention(globalRetentionSettings.get(), dataStream.isInternal()), rolloverFailureStore ); transportActionsDeduplicator.executeOnce( @@ -874,7 +871,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo */ Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { Metadata metadata = state.metadata(); - DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionResolver.resolve(state); + DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionSettings.get(); List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier, globalRetention); if (backingIndicesOlderThanRetention.isEmpty()) { return Set.of(); @@ -882,7 +879,7 @@ Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set< Set indicesToBeRemoved = new HashSet<>(); // We know that there is lifecycle and retention because there are indices to be deleted assert dataStream.getLifecycle() != null; - TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(globalRetention); + TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(globalRetention, dataStream.isInternal()); for (Index index : backingIndicesOlderThanRetention) { if (indicesToExcludeForRemainingRun.contains(index) == false) { IndexMetadata backingIndex = metadata.index(index); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java deleted file mode 100644 index a18095c555f12..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateAckListener; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.SimpleBatchedAckListenerTaskExecutor; -import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.MasterServiceTaskQueue; -import org.elasticsearch.common.Priority; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.lifecycle.action.DeleteDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamGlobalRetentionAction; -import org.elasticsearch.datastreams.lifecycle.action.UpdateDataStreamGlobalRetentionResponse; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Objects; - -/** - * This service manages the global retention configuration, it provides an API to set or remove global retention - * from the cluster state. - */ -public class UpdateDataStreamGlobalRetentionService { - - private static final Logger logger = LogManager.getLogger(UpdateDataStreamGlobalRetentionService.class); - - private final DataStreamGlobalRetentionResolver globalRetentionResolver; - private final MasterServiceTaskQueue taskQueue; - - public UpdateDataStreamGlobalRetentionService( - ClusterService clusterService, - DataStreamGlobalRetentionResolver globalRetentionResolver - ) { - this.globalRetentionResolver = globalRetentionResolver; - ClusterStateTaskExecutor executor = new SimpleBatchedAckListenerTaskExecutor<>() { - - @Override - public Tuple executeTask( - UpsertGlobalDataStreamMetadataTask task, - ClusterState clusterState - ) { - return new Tuple<>(updateGlobalRetention(clusterState, task.globalRetention()), task); - } - }; - this.taskQueue = clusterService.createTaskQueue("data-stream-global-retention", Priority.HIGH, executor); - - } - - public void updateGlobalRetention( - PutDataStreamGlobalRetentionAction.Request request, - List affectedDataStreams, - final ActionListener listener - ) { - taskQueue.submitTask( - "update-data-stream-global-retention", - new UpsertGlobalDataStreamMetadataTask( - request.getGlobalRetention(), - affectedDataStreams, - listener, - request.masterNodeTimeout() - ), - request.masterNodeTimeout() - ); - } - - public void removeGlobalRetention( - DeleteDataStreamGlobalRetentionAction.Request request, - List affectedDataStreams, - final ActionListener listener - ) { - taskQueue.submitTask( - "remove-data-stream-global-retention", - new UpsertGlobalDataStreamMetadataTask(null, affectedDataStreams, listener, request.masterNodeTimeout()), - request.masterNodeTimeout() - ); - } - - public List determineAffectedDataStreams( - @Nullable DataStreamGlobalRetention newGlobalRetention, - ClusterState clusterState - ) { - var previousGlobalRetention = globalRetentionResolver.resolve(clusterState); - if (Objects.equals(newGlobalRetention, previousGlobalRetention)) { - return List.of(); - } - List affectedDataStreams = new ArrayList<>(); - for (DataStream dataStream : clusterState.metadata().dataStreams().values()) { - if (dataStream.getLifecycle() != null) { - TimeValue previousEffectiveRetention = dataStream.getLifecycle() - .getEffectiveDataRetention(dataStream.isSystem() ? null : previousGlobalRetention); - TimeValue newEffectiveRetention = dataStream.getLifecycle() - .getEffectiveDataRetention(dataStream.isSystem() ? null : newGlobalRetention); - if (Objects.equals(previousEffectiveRetention, newEffectiveRetention) == false) { - affectedDataStreams.add( - new UpdateDataStreamGlobalRetentionResponse.AffectedDataStream( - dataStream.getName(), - newEffectiveRetention, - previousEffectiveRetention - ) - ); - } - } - } - affectedDataStreams.sort(Comparator.comparing(UpdateDataStreamGlobalRetentionResponse.AffectedDataStream::dataStreamName)); - return affectedDataStreams; - } - - // Visible for testing - ClusterState updateGlobalRetention(ClusterState clusterState, @Nullable DataStreamGlobalRetention retentionFromRequest) { - // Detecting if this update will result in a change in the cluster state, requires to use only the global retention from - // the cluster state and not the factory retention. - final var initialRetentionFromClusterState = DataStreamGlobalRetention.getFromClusterState(clusterState); - // Avoid storing empty retention in the cluster state - final var newRetention = DataStreamGlobalRetention.EMPTY.equals(retentionFromRequest) ? null : retentionFromRequest; - if (Objects.equals(newRetention, initialRetentionFromClusterState)) { - return clusterState; - } - if (newRetention == null) { - return clusterState.copyAndUpdate(b -> b.removeCustom(DataStreamGlobalRetention.TYPE)); - } - return clusterState.copyAndUpdate(b -> b.putCustom(DataStreamGlobalRetention.TYPE, newRetention)); - } - - /** - * A base class for the task updating the global retention in the cluster state. - */ - record UpsertGlobalDataStreamMetadataTask( - @Nullable DataStreamGlobalRetention globalRetention, - List affectedDataStreams, - ActionListener listener, - TimeValue ackTimeout - ) implements ClusterStateTaskListener, ClusterStateAckListener { - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return true; - } - - @Override - public void onAllNodesAcked() { - listener.onResponse(new UpdateDataStreamGlobalRetentionResponse(true, affectedDataStreams)); - } - - @Override - public void onAckFailure(Exception e) { - logger.debug("Failed to update global retention [{}] with error [{}]", globalRetention, e.getMessage()); - listener.onResponse(UpdateDataStreamGlobalRetentionResponse.FAILED); - } - - @Override - public void onAckTimeout() { - logger.debug("Failed to update global retention [{}] because timeout was reached", globalRetention); - listener.onResponse(UpdateDataStreamGlobalRetentionResponse.FAILED); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java deleted file mode 100644 index 92cb855b7cb4e..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.action; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.datastreams.lifecycle.UpdateDataStreamGlobalRetentionService; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -/** - * Deletes the global retention for data streams (if it's not a dry run) and it returns the affected data streams. - */ -public class DeleteDataStreamGlobalRetentionAction { - - public static final ActionType INSTANCE = new ActionType<>( - "cluster:admin/data_stream/global_retention/delete" - ); - - private DeleteDataStreamGlobalRetentionAction() {/* no instances */} - - public static final class Request extends MasterNodeRequest { - private boolean dryRun = false; - - public Request(StreamInput in) throws IOException { - super(in); - dryRun = in.readBoolean(); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(dryRun); - } - - public Request(TimeValue masterNodeTimeout) { - super(masterNodeTimeout); - } - - public boolean dryRun() { - return dryRun; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DeleteDataStreamGlobalRetentionAction.Request request = (DeleteDataStreamGlobalRetentionAction.Request) o; - return dryRun == request.dryRun; - } - - @Override - public int hashCode() { - return Objects.hash(dryRun); - } - - public void dryRun(boolean dryRun) { - this.dryRun = dryRun; - } - } - - public static class TransportDeleteDataStreamGlobalRetentionAction extends TransportMasterNodeAction< - Request, - UpdateDataStreamGlobalRetentionResponse> { - - private final UpdateDataStreamGlobalRetentionService globalRetentionService; - private final FeatureService featureService; - - @Inject - public TransportDeleteDataStreamGlobalRetentionAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - UpdateDataStreamGlobalRetentionService globalRetentionService, - FeatureService featureService - ) { - super( - INSTANCE.name(), - transportService, - clusterService, - threadPool, - actionFilters, - Request::new, - indexNameExpressionResolver, - UpdateDataStreamGlobalRetentionResponse::new, - threadPool.executor(ThreadPool.Names.MANAGEMENT) - ); - this.globalRetentionService = globalRetentionService; - this.featureService = featureService; - } - - @Override - protected void masterOperation( - Task task, - Request request, - ClusterState state, - ActionListener listener - ) throws Exception { - if (featureService.clusterHasFeature(state, DataStreamGlobalRetention.GLOBAL_RETENTION) == false) { - listener.onFailure( - new ResourceNotFoundException( - "Data stream global retention feature not found, please ensure all nodes have the feature " - + DataStreamGlobalRetention.GLOBAL_RETENTION.id() - ) - ); - return; - } - List affectedDataStreams = globalRetentionService - .determineAffectedDataStreams(null, state); - if (request.dryRun()) { - listener.onResponse(new UpdateDataStreamGlobalRetentionResponse(false, true, affectedDataStreams)); - } else { - globalRetentionService.removeGlobalRetention(request, affectedDataStreams, listener); - } - } - - @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java deleted file mode 100644 index 1d1064dd42b1a..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.action; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Objects; - -/** - * Retrieves the global retention for data streams. - */ -public class GetDataStreamGlobalRetentionAction { - - public static final ActionType INSTANCE = new ActionType<>("cluster:monitor/data_stream/global_retention/get"); - - private GetDataStreamGlobalRetentionAction() {/* no instances */} - - public static final class Request extends MasterNodeReadRequest { - - public Request(StreamInput in) throws IOException { - super(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public boolean equals(Object obj) { - return super.equals(obj); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - } - - public static class Response extends ActionResponse implements ToXContentObject { - - private final DataStreamGlobalRetention globalRetention; - - public Response(DataStreamGlobalRetention globalRetention) { - this.globalRetention = globalRetention; - } - - public Response(StreamInput in) throws IOException { - super(in); - globalRetention = DataStreamGlobalRetention.read(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - globalRetention.writeTo(out); - } - - @Override - public String toString() { - return "Response{" + "globalRetention=" + globalRetention + '}'; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - globalRetention.toXContentFragment(builder, params); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Response that = (Response) o; - return Objects.equals(globalRetention, that.globalRetention); - } - - @Override - public int hashCode() { - return Objects.hash(globalRetention); - } - } - - public static class TransportGetDataStreamGlobalSettingsAction extends TransportMasterNodeReadAction { - - private final FeatureService featureService; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; - - @Inject - public TransportGetDataStreamGlobalSettingsAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - FeatureService featureService, - DataStreamGlobalRetentionResolver globalRetentionResolver - ) { - super( - INSTANCE.name(), - transportService, - clusterService, - threadPool, - actionFilters, - Request::new, - indexNameExpressionResolver, - Response::new, - threadPool.executor(ThreadPool.Names.MANAGEMENT) - ); - this.featureService = featureService; - this.globalRetentionResolver = globalRetentionResolver; - } - - @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - if (featureService.clusterHasFeature(state, DataStreamGlobalRetention.GLOBAL_RETENTION) == false) { - listener.onFailure( - new ResourceNotFoundException( - "Data stream global retention feature not found, please ensure all nodes have the feature " - + DataStreamGlobalRetention.GLOBAL_RETENTION.id() - ) - ); - return; - } - DataStreamGlobalRetention globalRetention = globalRetentionResolver.resolve(state); - listener.onResponse(new Response(globalRetention == null ? DataStreamGlobalRetention.EMPTY : globalRetention)); - } - - @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java index 6e930defd4e0b..71f07c8cac668 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -76,7 +76,7 @@ public Response(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(runDuration); out.writeOptionalVLong(timeBetweenStarts); - out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + out.writeCollection(dataStreamStats, StreamOutput::writeWriteable); } public Long getRunDuration() { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java deleted file mode 100644 index cd9156ad8b2c8..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.action; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.datastreams.lifecycle.UpdateDataStreamGlobalRetentionService; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -/** - * Sets the global retention for data streams (if it's not a dry run) and it returns the affected data streams. - */ -public class PutDataStreamGlobalRetentionAction { - - public static final ActionType INSTANCE = new ActionType<>( - "cluster:admin/data_stream/global_retention/put" - ); - - private PutDataStreamGlobalRetentionAction() {/* no instances */} - - public static final class Request extends MasterNodeRequest { - - private final DataStreamGlobalRetention globalRetention; - private boolean dryRun = false; - - public Request(StreamInput in) throws IOException { - super(in); - globalRetention = DataStreamGlobalRetention.read(in); - dryRun = in.readBoolean(); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (globalRetention.equals(DataStreamGlobalRetention.EMPTY)) { - return ValidateActions.addValidationError( - "At least one of 'default_retention' or 'max_retention' should be defined." - + " If you want to remove the configuration please use the DELETE method", - validationException - ); - } - return validationException; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - globalRetention.writeTo(out); - out.writeBoolean(dryRun); - } - - public Request(TimeValue masterNodeTimeout, @Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { - super(masterNodeTimeout); - this.globalRetention = new DataStreamGlobalRetention(defaultRetention, maxRetention); - } - - public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; - } - - public boolean dryRun() { - return dryRun; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - PutDataStreamGlobalRetentionAction.Request request = (PutDataStreamGlobalRetentionAction.Request) o; - return Objects.equals(globalRetention, request.globalRetention) && dryRun == request.dryRun; - } - - @Override - public int hashCode() { - return Objects.hash(globalRetention, dryRun); - } - - public void dryRun(boolean dryRun) { - this.dryRun = dryRun; - } - } - - public static class TransportPutDataStreamGlobalRetentionAction extends TransportMasterNodeAction< - Request, - UpdateDataStreamGlobalRetentionResponse> { - - private final UpdateDataStreamGlobalRetentionService globalRetentionService; - private final FeatureService featureService; - - @Inject - public TransportPutDataStreamGlobalRetentionAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - UpdateDataStreamGlobalRetentionService globalRetentionService, - FeatureService featureService - ) { - super( - INSTANCE.name(), - transportService, - clusterService, - threadPool, - actionFilters, - Request::new, - indexNameExpressionResolver, - UpdateDataStreamGlobalRetentionResponse::new, - threadPool.executor(ThreadPool.Names.MANAGEMENT) - ); - this.globalRetentionService = globalRetentionService; - this.featureService = featureService; - } - - @Override - protected void masterOperation( - Task task, - Request request, - ClusterState state, - ActionListener listener - ) throws Exception { - if (featureService.clusterHasFeature(state, DataStreamGlobalRetention.GLOBAL_RETENTION) == false) { - listener.onFailure( - new ResourceNotFoundException( - "Data stream global retention feature not found, please ensure all nodes have the feature " - + DataStreamGlobalRetention.GLOBAL_RETENTION.id() - ) - ); - return; - } - List affectedDataStreams = globalRetentionService - .determineAffectedDataStreams(request.globalRetention, state); - if (request.dryRun()) { - listener.onResponse(new UpdateDataStreamGlobalRetentionResponse(false, true, affectedDataStreams)); - } else { - globalRetentionService.updateGlobalRetention(request, affectedDataStreams, listener); - } - } - - @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportDeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportDeleteDataStreamLifecycleAction.java index 9683588bdcae3..9dd5794d980f6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportDeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportDeleteDataStreamLifecycleAction.java @@ -18,9 +18,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java index fe5b3a1a378ff..0ffb5809c2f0f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java @@ -18,17 +18,17 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -44,7 +44,7 @@ public class TransportExplainDataStreamLifecycleAction extends TransportMasterNo ExplainDataStreamLifecycleAction.Response> { private final DataStreamLifecycleErrorStore errorStore; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportExplainDataStreamLifecycleAction( @@ -54,7 +54,7 @@ public TransportExplainDataStreamLifecycleAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DataStreamLifecycleErrorStore dataLifecycleServiceErrorStore, - DataStreamGlobalRetentionResolver globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( ExplainDataStreamLifecycleAction.INSTANCE.name(), @@ -68,7 +68,7 @@ public TransportExplainDataStreamLifecycleAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.errorStore = dataLifecycleServiceErrorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -103,7 +103,7 @@ protected void masterOperation( ExplainIndexDataStreamLifecycle explainIndexDataStreamLifecycle = new ExplainIndexDataStreamLifecycle( index, true, - parentDataStream.isSystem(), + parentDataStream.isInternal(), idxMetadata.getCreationDate(), rolloverInfo == null ? null : rolloverInfo.getTime(), generationDate, @@ -118,7 +118,7 @@ protected void masterOperation( new ExplainDataStreamLifecycleAction.Response( explainIndices, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.resolve(state) + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java index 7ac9eaae41a50..452295aab0ce9 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,7 +40,7 @@ public class TransportGetDataStreamLifecycleAction extends TransportMasterNodeRe GetDataStreamLifecycleAction.Request, GetDataStreamLifecycleAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportGetDataStreamLifecycleAction( @@ -49,7 +49,7 @@ public TransportGetDataStreamLifecycleAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionResolver globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamLifecycleAction.INSTANCE.name(), @@ -63,7 +63,7 @@ public TransportGetDataStreamLifecycleAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -96,7 +96,7 @@ protected void masterOperation( .sorted(Comparator.comparing(GetDataStreamLifecycleAction.Response.DataStreamLifecycle::dataStreamName)) .toList(), request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.resolve(state) + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java index 03bc1d129eaba..3a5ef0e86e889 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleStatsAction.java @@ -17,10 +17,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java index 11ecf85b1ac26..ed9f263bdf03a 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportPutDataStreamLifecycleAction.java @@ -19,9 +19,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/UpdateDataStreamGlobalRetentionResponse.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/UpdateDataStreamGlobalRetentionResponse.java deleted file mode 100644 index d0ab707b91f20..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/UpdateDataStreamGlobalRetentionResponse.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.action; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; -import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; - -/** - * This response is used by {@link PutDataStreamGlobalRetentionAction} and {@link DeleteDataStreamGlobalRetentionAction} to - * communicate to the user the result of a global retention update and the affected data streams. - */ -public final class UpdateDataStreamGlobalRetentionResponse extends ActionResponse implements ChunkedToXContentObject { - - public static final UpdateDataStreamGlobalRetentionResponse FAILED = new UpdateDataStreamGlobalRetentionResponse( - false, - false, - List.of() - ); - - private final boolean acknowledged; - private final boolean dryRun; - private final List affectedDataStreams; - - public UpdateDataStreamGlobalRetentionResponse(StreamInput in) throws IOException { - super(in); - acknowledged = in.readBoolean(); - dryRun = in.readBoolean(); - affectedDataStreams = in.readCollectionAsImmutableList(AffectedDataStream::read); - } - - public UpdateDataStreamGlobalRetentionResponse(boolean acknowledged, List affectedDataStreams) { - this(acknowledged, false, affectedDataStreams); - } - - public UpdateDataStreamGlobalRetentionResponse(boolean acknowledged, boolean dryRun, List affectedDataStreams) { - this.acknowledged = acknowledged; - this.dryRun = dryRun; - this.affectedDataStreams = affectedDataStreams; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(acknowledged); - out.writeBoolean(dryRun); - out.writeCollection(affectedDataStreams); - } - - @Override - public Iterator toXContentChunked(ToXContent.Params params) { - return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single(((builder, params1) -> { - builder.field("acknowledged", acknowledged); - builder.field("dry_run", dryRun); - return builder; - })), - ChunkedToXContentHelper.startArray("affected_data_streams"), - Iterators.map(affectedDataStreams.iterator(), affectedDataStream -> affectedDataStream::toXContent), - ChunkedToXContentHelper.endArray(), - ChunkedToXContentHelper.endObject() - ); - } - - public record AffectedDataStream(String dataStreamName, TimeValue newEffectiveRetention, TimeValue previousEffectiveRetention) - implements - Writeable, - ToXContentObject { - - public static AffectedDataStream read(StreamInput in) throws IOException { - return new AffectedDataStream(in.readString(), in.readOptionalTimeValue(), in.readOptionalTimeValue()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(dataStreamName); - out.writeOptionalTimeValue(newEffectiveRetention); - out.writeOptionalTimeValue(previousEffectiveRetention); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("name", dataStreamName); - builder.field("new_effective_retention", newEffectiveRetention == null ? "infinite" : newEffectiveRetention.getStringRep()); - builder.field( - "previous_effective_retention", - previousEffectiveRetention == null ? "infinite" : previousEffectiveRetention.getStringRep() - ); - builder.endObject(); - return builder; - } - } - - public boolean isAcknowledged() { - return acknowledged; - } - - public boolean isDryRun() { - return dryRun; - } - - public List getAffectedDataStreams() { - return affectedDataStreams; - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index 048ef0bab8e0c..82350130e57af 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -39,10 +41,12 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); - ExplainDataStreamLifecycleAction.Request explainRequest = new ExplainDataStreamLifecycleAction.Request(indices); + ExplainDataStreamLifecycleAction.Request explainRequest = new ExplainDataStreamLifecycleAction.Request( + getMasterNodeTimeout(restRequest), + indices + ); explainRequest.includeDefaults(restRequest.paramAsBoolean("include_defaults", false)); explainRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); - explainRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( ExplainDataStreamLifecycleAction.INSTANCE, explainRequest, @@ -54,4 +58,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index 3d802d483fd8c..00f9d4da88301 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -10,14 +10,17 @@ import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -37,6 +40,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { GetDataStreamLifecycleAction.Request getDataLifecycleRequest = new GetDataStreamLifecycleAction.Request( + RestUtils.getMasterNodeTimeout(request), Strings.splitStringByCommaToArray(request.param("name")) ); getDataLifecycleRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); @@ -52,4 +56,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java index 59d7099e27b52..4b285eaf59b97 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java @@ -41,10 +41,17 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { try (XContentParser parser = request.contentParser()) { - PutDataStreamLifecycleAction.Request putLifecycleRequest = PutDataStreamLifecycleAction.Request.parseRequest(parser); - putLifecycleRequest.indices(Strings.splitStringByCommaToArray(request.param("name"))); - putLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - putLifecycleRequest.ackTimeout(getAckTimeout(request)); + PutDataStreamLifecycleAction.Request putLifecycleRequest = PutDataStreamLifecycleAction.Request.parseRequest( + parser, + (dataRetention, enabled, downsampling) -> new PutDataStreamLifecycleAction.Request( + getMasterNodeTimeout(request), + getAckTimeout(request), + Strings.splitStringByCommaToArray(request.param("name")), + dataRetention, + enabled, + downsampling + ) + ); putLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(request, putLifecycleRequest.indicesOptions())); return channel -> client.execute( PutDataStreamLifecycleAction.INSTANCE, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestCreateDataStreamAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestCreateDataStreamAction.java index b747f413e0432..a6dc9cfc9fb18 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestCreateDataStreamAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestCreateDataStreamAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -35,7 +36,11 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - CreateDataStreamAction.Request putDataStreamRequest = new CreateDataStreamAction.Request(request.param("name")); + CreateDataStreamAction.Request putDataStreamRequest = new CreateDataStreamAction.Request( + RestUtils.getMasterNodeTimeout(request), + RestUtils.getAckTimeout(request), + request.param("name") + ); return channel -> client.execute(CreateDataStreamAction.INSTANCE, putDataStreamRequest, new RestToXContentListener<>(channel)); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestDeleteDataStreamAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestDeleteDataStreamAction.java index df6f490c50764..551d14ced5104 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestDeleteDataStreamAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestDeleteDataStreamAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -37,6 +38,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { DeleteDataStreamAction.Request deleteDataStreamRequest = new DeleteDataStreamAction.Request( + RestUtils.getMasterNodeTimeout(request), Strings.splitStringByCommaToArray(request.param("name")) ); deleteDataStreamRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteDataStreamRequest.indicesOptions())); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index fbd4b9acf747e..c3178208d51c2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -10,14 +10,17 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -37,6 +40,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { GetDataStreamAction.Request getDataStreamsRequest = new GetDataStreamAction.Request( + RestUtils.getMasterNodeTimeout(request), Strings.splitStringByCommaToArray(request.param("name")) ); getDataStreamsRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); @@ -48,4 +52,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestMigrateToDataStreamAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestMigrateToDataStreamAction.java index 072801c3f9107..7c935bf825095 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestMigrateToDataStreamAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestMigrateToDataStreamAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -35,7 +36,11 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - MigrateToDataStreamAction.Request req = new MigrateToDataStreamAction.Request(request.param("name")); + MigrateToDataStreamAction.Request req = new MigrateToDataStreamAction.Request( + RestUtils.getMasterNodeTimeout(request), + RestUtils.getAckTimeout(request), + request.param("name") + ); return channel -> client.execute(MigrateToDataStreamAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java index 957ba1b3db8a0..0b33e06eb5940 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -20,8 +21,6 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestUtils.getAckTimeout; -import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestModifyDataStreamsAction extends BaseRestHandler { @@ -40,13 +39,18 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { ModifyDataStreamsAction.Request modifyDsRequest; try (XContentParser parser = request.contentParser()) { - modifyDsRequest = ModifyDataStreamsAction.Request.PARSER.parse(parser, null); + modifyDsRequest = ModifyDataStreamsAction.Request.PARSER.parse( + parser, + actions -> new ModifyDataStreamsAction.Request( + RestUtils.getMasterNodeTimeout(request), + RestUtils.getAckTimeout(request), + actions + ) + ); } if (modifyDsRequest.getActions() == null || modifyDsRequest.getActions().isEmpty()) { throw new IllegalArgumentException("no data stream actions specified, at least one must be specified"); } - modifyDsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - modifyDsRequest.ackTimeout(getAckTimeout(request)); return channel -> client.execute(ModifyDataStreamsAction.INSTANCE, modifyDsRequest, new RestToXContentListener<>(channel)); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestPromoteDataStreamAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestPromoteDataStreamAction.java index 23c744eaaed2c..50ee1e193d5c8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestPromoteDataStreamAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestPromoteDataStreamAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -31,7 +32,10 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - PromoteDataStreamAction.Request request = new PromoteDataStreamAction.Request(restRequest.param("name")); + PromoteDataStreamAction.Request request = new PromoteDataStreamAction.Request( + RestUtils.getMasterNodeTimeout(restRequest), + restRequest.param("name") + ); return channel -> client.execute(PromoteDataStreamAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index 2204c82670e69..bc313d145c17e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -236,7 +236,12 @@ private String createDataStream(boolean hidden) throws Exception { new TransportPutComposableIndexTemplateAction.Request(dataStreamName + "_template").indexTemplate(template) ) ); - assertAcked(client().execute(CreateDataStreamAction.INSTANCE, new CreateDataStreamAction.Request(dataStreamName))); + assertAcked( + client().execute( + CreateDataStreamAction.INSTANCE, + new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStreamName) + ) + ); createdDataStreams.add(dataStreamName); return dataStreamName; } @@ -277,7 +282,12 @@ private DataStreamsStatsAction.Response getDataStreamsStats(boolean includeHidde } private void deleteDataStream(String dataStreamName) { - assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { dataStreamName }))); + assertAcked( + client().execute( + DeleteDataStreamAction.INSTANCE, + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }) + ) + ); assertAcked( client().execute( TransportDeleteComposableIndexTemplateAction.TYPE, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 022e33621c4f4..d5356e371f497 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -216,7 +216,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, indexSettingProviders, - new DataStreamGlobalRetentionResolver(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 97959fa385241..eb35c44d30331 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java index bff238d7f16c3..7d0359a946c2e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java @@ -25,7 +25,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request createTestInstance() { - return new Request(randomAlphaOfLength(8)); + return new Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, randomAlphaOfLength(8)); } @Override @@ -34,13 +34,17 @@ protected Request mutateInstance(Request instance) { } public void testValidateRequest() { - CreateDataStreamAction.Request req = new CreateDataStreamAction.Request("my-data-stream"); + CreateDataStreamAction.Request req = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "my-data-stream" + ); ActionRequestValidationException e = req.validate(); assertNull(e); } public void testValidateRequestWithoutName() { - CreateDataStreamAction.Request req = new CreateDataStreamAction.Request(""); + CreateDataStreamAction.Request req = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, ""); ActionRequestValidationException e = req.validate(); assertNotNull(e); assertThat(e.validationErrors().size(), equalTo(1)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java index d1b3bde331524..056af78846f3e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java @@ -25,7 +25,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request createTestInstance() { - return new Request(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(6))); + return new Request(TEST_REQUEST_TIMEOUT, randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(6))); } @Override @@ -34,13 +34,13 @@ protected Request mutateInstance(Request instance) { } public void testValidateRequest() { - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "my-data-stream" }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "my-data-stream" }); ActionRequestValidationException e = req.validate(); assertNull(e); } public void testValidateRequestWithoutName() { - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[0]); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[0]); ActionRequestValidationException e = req.validate(); assertNotNull(e); assertThat(e.validationErrors().size(), equalTo(1)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java index d394db9523cce..cdfad60fcfcba 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java @@ -47,7 +47,7 @@ public void testDeleteDataStream() { final List otherIndices = randomSubsetOf(List.of("foo", "bar", "baz")); ClusterState cs = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 2)), otherIndices); - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }); ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(0)); assertThat(newState.metadata().indices().size(), equalTo(otherIndices.size())); @@ -71,7 +71,7 @@ public void testDeleteDataStreamWithFailureStore() { false, true ); - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }); ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(0)); assertThat(newState.metadata().indices().size(), equalTo(otherIndices.size())); @@ -92,7 +92,7 @@ public void testDeleteMultipleDataStreams() { List.of() ); - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "ba*", "eggplant" }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ba*", "eggplant" }); ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); DataStream remainingDataStream = newState.metadata().dataStreams().get(dataStreamNames[0]); @@ -116,7 +116,7 @@ public void testDeleteSnapshottingDataStream() { .withAddedEntry(createEntry(dataStreamName2, "repo2", true)); ClusterState snapshotCs = ClusterState.builder(cs).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(); - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }); SnapshotInProgressException e = expectThrows( SnapshotInProgressException.class, () -> DeleteDataStreamTransportAction.removeDataStream(iner, snapshotCs, req, validator, Settings.EMPTY) @@ -167,13 +167,16 @@ public void testDeleteNonexistentDataStream() { () -> DeleteDataStreamTransportAction.removeDataStream( iner, cs, - new DeleteDataStreamAction.Request(new String[] { dataStreamName }), + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }), validator, Settings.EMPTY ) ); - DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName + "*" }); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName + "*" } + ); ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState, sameInstance(cs)); assertThat(newState.metadata().dataStreams().size(), equalTo(cs.metadata().dataStreams().size())); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java index 5e6a1ee4bc9d7..58bb3919d5a7c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java @@ -20,7 +20,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request createTestInstance() { - return new Request(switch (randomIntBetween(1, 4)) { + return new Request(TEST_REQUEST_TIMEOUT, switch (randomIntBetween(1, 4)) { case 1 -> generateRandomStringArray(3, 8, false, false); case 2 -> { String[] parameters = generateRandomStringArray(3, 8, false, false); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java index 58ab69d383464..80d867ec7745e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -45,14 +45,15 @@ public class GetDataStreamsTransportActionTests extends ESTestCase { private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance(); private final SystemIndices systemIndices = new SystemIndices(List.of()); - private final DataStreamGlobalRetentionResolver dataStreamGlobalRetentionResolver = new DataStreamGlobalRetentionResolver( + private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); public void testGetDataStream() { final String dataStreamName = "my-data-stream"; ClusterState cs = getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 1)), List.of()); - GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }); List dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamName))); } @@ -64,19 +65,22 @@ public void testGetDataStreamsWithWildcards() { List.of() ); - GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamNames[1].substring(0, 5) + "*" }); + GetDataStreamAction.Request req = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamNames[1].substring(0, 5) + "*" } + ); List dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[1]))); - req = new GetDataStreamAction.Request(new String[] { "*" }); + req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[1], dataStreamNames[0]))); - req = new GetDataStreamAction.Request((String[]) null); + req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, (String[]) null); dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[1], dataStreamNames[0]))); - req = new GetDataStreamAction.Request(new String[] { "matches-none*" }); + req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "matches-none*" }); dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, empty()); } @@ -88,19 +92,22 @@ public void testGetDataStreamsWithoutWildcards() { List.of() ); - GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamNames[0], dataStreamNames[1] }); + GetDataStreamAction.Request req = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamNames[0], dataStreamNames[1] } + ); List dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[1], dataStreamNames[0]))); - req = new GetDataStreamAction.Request(new String[] { dataStreamNames[1] }); + req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamNames[1] }); dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[1]))); - req = new GetDataStreamAction.Request(new String[] { dataStreamNames[0] }); + req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamNames[0] }); dataStreams = GetDataStreamsTransportAction.getDataStreams(cs, resolver, req); assertThat(dataStreams, transformedItemsMatch(DataStream::getName, contains(dataStreamNames[0]))); - GetDataStreamAction.Request req2 = new GetDataStreamAction.Request(new String[] { "foo" }); + GetDataStreamAction.Request req2 = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "foo" }); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, () -> GetDataStreamsTransportAction.getDataStreams(cs, resolver, req2) @@ -111,7 +118,7 @@ public void testGetDataStreamsWithoutWildcards() { public void testGetNonexistentDataStream() { final String dataStreamName = "my-data-stream"; ClusterState cs = ClusterState.builder(new ClusterName("_name")).build(); - GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Request req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamName }); IndexNotFoundException e = expectThrows( IndexNotFoundException.class, () -> GetDataStreamsTransportAction.getDataStreams(cs, resolver, req) @@ -152,14 +159,14 @@ public void testGetTimeSeriesDataStream() { state = ClusterState.builder(new ClusterName("_name")).metadata(mBuilder).build(); } - var req = new GetDataStreamAction.Request(new String[] {}); + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); var response = GetDataStreamsTransportAction.innerOperation( state, req, resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -189,7 +196,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -232,14 +239,14 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { state = ClusterState.builder(new ClusterName("_name")).metadata(mBuilder).build(); } - var req = new GetDataStreamAction.Request(new String[] {}); + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); var response = GetDataStreamsTransportAction.innerOperation( state, req, resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -275,14 +282,14 @@ public void testGetTimeSeriesMixedDataStream() { state = ClusterState.builder(new ClusterName("_name")).metadata(mBuilder).build(); } - var req = new GetDataStreamAction.Request(new String[] {}); + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); var response = GetDataStreamsTransportAction.innerOperation( state, req, resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + dataStreamGlobalRetentionSettings ); var name1 = DataStream.getDefaultBackingIndexName("ds-1", 1, instant.toEpochMilli()); @@ -320,28 +327,39 @@ public void testPassingGlobalRetention() { state = ClusterState.builder(new ClusterName("_name")).metadata(mBuilder).build(); } - var req = new GetDataStreamAction.Request(new String[] {}); + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); var response = GetDataStreamsTransportAction.innerOperation( state, req, resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + dataStreamGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), nullValue()); DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention( TimeValue.timeValueDays(randomIntBetween(1, 5)), TimeValue.timeValueDays(randomIntBetween(5, 10)) ); - state = ClusterState.builder(state).putCustom(DataStreamGlobalRetention.TYPE, globalRetention).build(); + DataStreamGlobalRetentionSettings withGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + globalRetention.defaultRetention() + ) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention()) + .build() + ), + DataStreamFactoryRetention.emptyFactoryRetention() + ); response = GetDataStreamsTransportAction.innerOperation( state, req, resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionResolver + withGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index c965eb2ba2536..8cb27fd9fd282 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling.Round; @@ -138,7 +138,8 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private List clientSeenRequests; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; - private final DataStreamGlobalRetentionResolver globalRetentionResolver = new DataStreamGlobalRetentionResolver( + private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -187,7 +188,7 @@ public void setupServices() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1392,13 +1393,7 @@ public void testTimeSeriesIndicesStillWithinTimeBounds() { { // non time_series indices are not within time bounds (they don't have any) IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) - .settings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build() - ) + .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())) .build(); Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); @@ -1432,7 +1427,7 @@ public void testTrackingTimeStats() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); assertThat(service.getTimeBetweenStarts(), is(nullValue())); @@ -1596,12 +1591,14 @@ private ClusterState createClusterState(String indexName, Map cu var routingTableBuilder = RoutingTable.builder(); Metadata.Builder metadataBuilder = Metadata.builder(); Map indices = new HashMap<>(); - Settings indexSettings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 10)) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3)) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) - .build(); - IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).version(randomLong()).settings(indexSettings); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .version(randomLong()) + .settings( + indexSettings(randomIntBetween(1, 10), randomIntBetween(0, 3)).put( + IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), + IndexVersion.current() + ) + ); if (customDataStreamLifecycleMetadata != null) { indexMetadataBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, customDataStreamLifecycleMetadata); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java deleted file mode 100644 index 41d00d063955d..0000000000000 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle; - -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.test.ClusterServiceUtils; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.empty; - -public class UpdateDataStreamGlobalRetentionServiceTests extends ESTestCase { - private static TestThreadPool threadPool; - private ClusterService clusterService; - private UpdateDataStreamGlobalRetentionService service; - - @BeforeClass - public static void setupThreadPool() { - threadPool = new TestThreadPool(getTestClass().getName()); - } - - @Before - public void setupServices() { - clusterService = ClusterServiceUtils.createClusterService(threadPool); - service = new UpdateDataStreamGlobalRetentionService( - clusterService, - new DataStreamGlobalRetentionResolver(DataStreamFactoryRetention.emptyFactoryRetention()) - ); - } - - @After - public void closeClusterService() { - clusterService.close(); - } - - @AfterClass - public static void tearDownThreadPool() { - ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); - threadPool = null; - } - - public void testUpdateClusterState() { - // Removing from a cluster state without global retention - { - assertThat(service.updateGlobalRetention(ClusterState.EMPTY_STATE, null), equalTo(ClusterState.EMPTY_STATE)); - assertThat( - service.updateGlobalRetention(ClusterState.EMPTY_STATE, DataStreamGlobalRetention.EMPTY), - equalTo(ClusterState.EMPTY_STATE) - ); - } - - // Removing from a cluster state with global retention - { - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .putCustom(DataStreamGlobalRetention.TYPE, randomNonEmptyGlobalRetention()) - .build(); - DataStreamGlobalRetention updatedRetention = DataStreamGlobalRetention.getFromClusterState( - service.updateGlobalRetention(clusterState, null) - ); - assertThat(updatedRetention, nullValue()); - updatedRetention = DataStreamGlobalRetention.getFromClusterState( - service.updateGlobalRetention(clusterState, DataStreamGlobalRetention.EMPTY) - ); - assertThat(updatedRetention, nullValue()); - } - - // Updating retention - { - var initialRetention = randomNonEmptyGlobalRetention(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .putCustom(DataStreamGlobalRetention.TYPE, initialRetention) - .build(); - var expectedRetention = randomValueOtherThan( - initialRetention, - UpdateDataStreamGlobalRetentionServiceTests::randomNonEmptyGlobalRetention - ); - var updatedRetention = DataStreamGlobalRetention.getFromClusterState( - service.updateGlobalRetention(clusterState, expectedRetention) - ); - assertThat(updatedRetention, equalTo(expectedRetention)); - } - - // No change means no new cluster state - { - UpdateDataStreamGlobalRetentionService serviceWithRandomFactoryRetention = new UpdateDataStreamGlobalRetentionService( - clusterService, - new DataStreamGlobalRetentionResolver( - randomBoolean() ? DataStreamFactoryRetention.emptyFactoryRetention() : randomNonEmptyFactoryRetention() - ) - ); - var retention = randomBoolean() ? null : randomNonEmptyGlobalRetention(); - ClusterState clusterState = retention == null - ? ClusterState.EMPTY_STATE - : ClusterState.builder(ClusterName.DEFAULT).putCustom(DataStreamGlobalRetention.TYPE, retention).build(); - var updatedClusterState = serviceWithRandomFactoryRetention.updateGlobalRetention(clusterState, retention); - assertThat(updatedClusterState == clusterState, is(true)); - } - } - - public void testDetermineAffectedDataStreams() { - Metadata.Builder builder = Metadata.builder(); - DataStream dataStreamWithoutLifecycle = newDataStreamInstance( - "ds-no-lifecycle", - List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), - 1, - null, - false, - null, - List.of() - ); - builder.put(dataStreamWithoutLifecycle); - String dataStreamNoRetention = "ds-no-retention"; - DataStream dataStreamWithLifecycleNoRetention = newDataStreamInstance( - dataStreamNoRetention, - List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), - 1, - null, - false, - DataStreamLifecycle.DEFAULT, - List.of() - ); - - builder.put(dataStreamWithLifecycleNoRetention); - DataStream dataStreamWithLifecycleShortRetention = newDataStreamInstance( - "ds-no-short-retention", - List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), - 1, - null, - false, - DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(7)).build(), - List.of() - ); - builder.put(dataStreamWithLifecycleShortRetention); - String dataStreamLongRetention = "ds-long-retention"; - DataStream dataStreamWithLifecycleLongRetention = newDataStreamInstance( - dataStreamLongRetention, - List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), - 1, - null, - false, - DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(365)).build(), - List.of() - ); - builder.put(dataStreamWithLifecycleLongRetention); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - // No global retention - { - var affectedDataStreams = service.determineAffectedDataStreams(null, clusterState); - assertThat(affectedDataStreams.isEmpty(), is(true)); - } - // No difference in global retention - { - var globalRetention = randomNonEmptyGlobalRetention(); - var clusterStateWithRetention = ClusterState.builder(clusterState) - .putCustom(DataStreamGlobalRetention.TYPE, globalRetention) - .build(); - var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterStateWithRetention); - assertThat(affectedDataStreams.isEmpty(), is(true)); - } - // Default retention in effect - { - var globalRetention = new DataStreamGlobalRetention(TimeValue.timeValueDays(randomIntBetween(1, 10)), null); - var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterState); - if (dataStreamWithLifecycleNoRetention.isSystem()) { - assertThat(affectedDataStreams.size(), is(0)); - } else { - assertThat(affectedDataStreams.size(), is(1)); - var dataStream = affectedDataStreams.get(0); - assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention)); - assertThat(dataStream.previousEffectiveRetention(), nullValue()); - assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getDefaultRetention())); - } - } - // Max retention in effect - { - var globalRetention = new DataStreamGlobalRetention(null, TimeValue.timeValueDays(randomIntBetween(10, 90))); - var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterState); - if (dataStreamWithLifecycleLongRetention.isSystem() && dataStreamWithLifecycleNoRetention.isSystem()) { - assertThat(affectedDataStreams.size(), is(0)); - } else if (dataStreamWithLifecycleLongRetention.isSystem() == false && dataStreamWithLifecycleNoRetention.isSystem() == false) { - assertThat(affectedDataStreams.size(), is(2)); - var dataStream = affectedDataStreams.get(0); - assertThat(dataStream.dataStreamName(), equalTo(dataStreamLongRetention)); - assertThat(dataStream.previousEffectiveRetention(), notNullValue()); - assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention())); - dataStream = affectedDataStreams.get(1); - assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention)); - assertThat(dataStream.previousEffectiveRetention(), nullValue()); - assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention())); - } else if (dataStreamWithLifecycleLongRetention.isSystem() == false) { - assertThat(affectedDataStreams.size(), is(1)); - var dataStream = affectedDataStreams.get(0); - assertThat(dataStream.dataStreamName(), equalTo(dataStreamLongRetention)); - assertThat(dataStream.previousEffectiveRetention(), notNullValue()); - assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention())); - } else { - assertThat(affectedDataStreams.size(), is(1)); - var dataStream = affectedDataStreams.get(0); - assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention)); - assertThat(dataStream.previousEffectiveRetention(), nullValue()); - assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention())); - } - } - - // Requested global retention match the factory retention, so no affected data streams - { - DataStreamFactoryRetention factoryRetention = randomNonEmptyFactoryRetention(); - UpdateDataStreamGlobalRetentionService serviceWithRandomFactoryRetention = new UpdateDataStreamGlobalRetentionService( - clusterService, - new DataStreamGlobalRetentionResolver(factoryRetention) - ); - var globalRetention = new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention()); - var affectedDataStreams = serviceWithRandomFactoryRetention.determineAffectedDataStreams(globalRetention, clusterState); - assertThat(affectedDataStreams, is(empty())); - } - } - - private static DataStream newDataStreamInstance( - String name, - List indices, - long generation, - Map metadata, - boolean replicated, - @Nullable DataStreamLifecycle lifecycle, - List failureStores - ) { - DataStream.Builder builder = DataStream.builder(name, indices) - .setGeneration(generation) - .setMetadata(metadata) - .setReplicated(replicated) - .setLifecycle(lifecycle) - .setFailureStoreEnabled(failureStores.isEmpty() == false) - .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()); - if (randomBoolean()) { - builder.setSystem(true); - builder.setHidden(true); - } - return builder.build(); - } - - private static DataStreamGlobalRetention randomNonEmptyGlobalRetention() { - boolean withDefault = randomBoolean(); - return new DataStreamGlobalRetention( - withDefault ? TimeValue.timeValueDays(randomIntBetween(1, 1000)) : null, - withDefault == false || randomBoolean() ? TimeValue.timeValueDays(randomIntBetween(1000, 2000)) : null - ); - } - - private static DataStreamFactoryRetention randomNonEmptyFactoryRetention() { - boolean withDefault = randomBoolean(); - TimeValue defaultRetention = withDefault ? TimeValue.timeValueDays(randomIntBetween(10, 20)) : null; - TimeValue maxRetention = withDefault && randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(50, 200)); - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return maxRetention; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } -} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 35e3f38d55c26..ec0c82365a681 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -1150,3 +1150,96 @@ setup: name: simple-data-stream2 - is_true: acknowledged +--- +"Create data stream with match all template": + - requires: + cluster_features: ["gte_v8.16.0"] + reason: "data streams supoprt for match all templates only supported in 8.16" + + - do: + allowed_warnings: + - "index template [match-all-template] has index patterns [*] matching patterns from existing older templates [.monitoring-logstash,.monitoring-es,.monitoring-beats,.monitoring-alerts-7,global,.monitoring-kibana] with patterns (.monitoring-logstash => [.monitoring-logstash-7-*],.monitoring-es => [.monitoring-es-7-*],.monitoring-beats => [.monitoring-beats-7-*],.monitoring-alerts-7 => [.monitoring-alerts-7],global => [*],.monitoring-kibana => [.monitoring-kibana-7-*]); this template [match-all-template] will take precedence during new index creation" + - "index template [match-all-template] has index patterns [*] matching patterns from existing older templates [.monitoring-logstash,.monitoring-es,.monitoring-beats,.monitoring-alerts-7,.monitoring-kibana] with patterns (.monitoring-logstash => [.monitoring-logstash-7-*],.monitoring-es => [.monitoring-es-7-*],.monitoring-beats => [.monitoring-beats-7-*],.monitoring-alerts-7 => [.monitoring-alerts-7],.monitoring-kibana => [.monitoring-kibana-7-*]); this template [match-all-template] will take precedence during new index creation" + indices.put_index_template: + name: match-all-template + body: + index_patterns: [ "*" ] + priority: 1 + data_stream: {} + + - do: + indices.create_data_stream: + name: match-all-data-stream + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_data_stream: + name: "*" + - match: { data_streams.0.name: match-all-data-stream } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-match-all-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.status: 'GREEN' } + - match: { data_streams.0.template: 'match-all-template' } + - match: { data_streams.0.hidden: false } + + - do: + indices.delete_data_stream: + name: match-all-data-stream + - is_true: acknowledged + + - do: + indices.delete_index_template: + name: match-all-template + - is_true: acknowledged + +--- +"Create hidden data stream with match all template": + - requires: + cluster_features: [ "gte_v8.16.0" ] + reason: "data streams supoprt for match all templates only supported in 8.16" + - do: + allowed_warnings: + - "index template [match-all-hidden-template] has index patterns [*] matching patterns from existing older templates [.monitoring-logstash,.monitoring-es,.monitoring-beats,.monitoring-alerts-7,global,.monitoring-kibana] with patterns (.monitoring-logstash => [.monitoring-logstash-7-*],.monitoring-es => [.monitoring-es-7-*],.monitoring-beats => [.monitoring-beats-7-*],.monitoring-alerts-7 => [.monitoring-alerts-7],global => [*],.monitoring-kibana => [.monitoring-kibana-7-*]); this template [match-all-hidden-template] will take precedence during new index creation" + - "index template [match-all-hidden-template] has index patterns [*] matching patterns from existing older templates [.monitoring-logstash,.monitoring-es,.monitoring-beats,.monitoring-alerts-7,.monitoring-kibana] with patterns (.monitoring-logstash => [.monitoring-logstash-7-*],.monitoring-es => [.monitoring-es-7-*],.monitoring-beats => [.monitoring-beats-7-*],.monitoring-alerts-7 => [.monitoring-alerts-7],.monitoring-kibana => [.monitoring-kibana-7-*]); this template [match-all-hidden-template] will take precedence during new index creation" + indices.put_index_template: + name: match-all-hidden-template + body: + index_patterns: [ "*" ] + priority: 1 + data_stream: + hidden: true + - do: + indices.create_data_stream: + name: match-all-hidden-data-stream + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_data_stream: + name: "*" + - length: { data_streams: 0 } + + - do: + indices.get_data_stream: + name: ['*'] + expand_wildcards: hidden + - length: { data_streams: 1 } + - match: { data_streams.0.name: match-all-hidden-data-stream } + - match: { data_streams.0.hidden: true } + + - do: + indices.delete_data_stream: + name: match-all-hidden-data-stream + - is_true: acknowledged + + - do: + indices.delete_index_template: + name: match-all-hidden-template diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 8c23232bb457c..d20231a6d6cf2 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -232,13 +232,13 @@ dynamic templates: refresh: true body: - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": "10", "attributes.dim1": "A", "attributes.dim2": "1", "attributes.another.dim1": "C", "attributes.another.dim2": "10.5" }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "data": "10", "attributes.dim1": "A", "attributes.dim2": "1", "attributes.another.dim1": "C", "attributes.another.dim2": "10.5", "attributes.another.dim3": true }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:09.138Z", "data": "20", "attributes.dim1": "A", "attributes.dim2": "1", "attributes.another.dim1": "C", "attributes.another.dim2": "10.5" }' + - '{ "@timestamp": "2023-09-01T13:03:09.138Z", "data": "20", "attributes.dim1": "A", "attributes.dim2": "1", "attributes.another.dim1": "C", "attributes.another.dim2": "10.5", "attributes.another.dim3": true }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:10.138Z", "data": "30", "attributes.dim1": "B", "attributes.dim2": "2", "attributes.another.dim1": "D", "attributes.another.dim2": "20.5" }' + - '{ "@timestamp": "2023-09-01T13:03:10.138Z", "data": "30", "attributes.dim1": "B", "attributes.dim2": "2", "attributes.another.dim1": "D", "attributes.another.dim2": "20.5", "attributes.another.dim3": false }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:10.238Z", "data": "40", "attributes.dim1": "B", "attributes.dim2": "2", "attributes.another.dim1": "D", "attributes.another.dim2": "20.5" }' + - '{ "@timestamp": "2023-09-01T13:03:10.238Z", "data": "40", "attributes.dim1": "B", "attributes.dim2": "2", "attributes.another.dim1": "D", "attributes.another.dim2": "20.5", "attributes.another.dim3": false }' - do: search: @@ -264,7 +264,7 @@ dynamic templates: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "MD2HE8yse1ZklY-p0-bRcC8gYpiKqVppKhfZ18WLDvTuNPo7EnyZdkhvafL006Xf2Q" } + - match: { aggregations.filterA.tsids.buckets.0.key: "NOHjOAVWLTVWZM4CXLoraZYgYpiKqVppKnpcfycX2dfFiw707uoshWIGVb-ie-ZDQ7hwqiw" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -283,7 +283,7 @@ dynamic templates: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "MD2HE8yse1ZklY-p0-bRcC8gYpiKqVppKhfZ18WLDvTuNPo7EnyZdkhvafL006Xf2Q" } + - match: { aggregations.filterA.tsids.buckets.0.key: "NOHjOAVWLTVWZM4CXLoraZYgYpiKqVppKnpcfycX2dfFiw707uoshWIGVb-ie-ZDQ7hwqiw" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -302,7 +302,7 @@ dynamic templates: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "MD2HE8yse1ZklY-p0-bRcC8gYpiKqVppKhfZ18WLDvTuNPo7EnyZdkhvafL006Xf2Q" } + - match: { aggregations.filterA.tsids.buckets.0.key: "NOHjOAVWLTVWZM4CXLoraZYgYpiKqVppKnpcfycX2dfFiw707uoshWIGVb-ie-ZDQ7hwqiw" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -321,7 +321,25 @@ dynamic templates: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "MD2HE8yse1ZklY-p0-bRcC8gYpiKqVppKhfZ18WLDvTuNPo7EnyZdkhvafL006Xf2Q" } + - match: { aggregations.filterA.tsids.buckets.0.key: "NOHjOAVWLTVWZM4CXLoraZYgYpiKqVppKnpcfycX2dfFiw707uoshWIGVb-ie-ZDQ7hwqiw" } + - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } + - do: + search: + index: k9s + body: + size: 0 + aggs: + filterA: + filter: + term: + another.dim3: true + aggs: + tsids: + terms: + field: _tsid + + - length: { aggregations.filterA.tsids.buckets: 1 } + - match: { aggregations.filterA.tsids.buckets.0.key: "NOHjOAVWLTVWZM4CXLoraZYgYpiKqVppKnpcfycX2dfFiw707uoshWIGVb-ie-ZDQ7hwqiw" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } --- @@ -554,13 +572,13 @@ dynamic templates with nesting: refresh: true body: - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:08.138Z","data": "10", "resource.attributes.dim1": "A", "resource.attributes.another.dim1": "1", "attributes.dim2": "C", "attributes.another.dim2": "10.5", "attributes.a.much.deeper.nested.dim": "AC" }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z","data": "10", "resource.attributes.dim1": "A", "resource.attributes.another.dim1": "1", "attributes.dim2": "C", "attributes.another.dim2": "10.5", "attributes.another.dim3": true, "attributes.a.much.deeper.nested.dim": "AC" }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:09.138Z","data": "20", "resource.attributes.dim1": "A", "resource.attributes.another.dim1": "1", "attributes.dim2": "C", "attributes.another.dim2": "10.5", "attributes.a.much.deeper.nested.dim": "AC" }' + - '{ "@timestamp": "2023-09-01T13:03:09.138Z","data": "20", "resource.attributes.dim1": "A", "resource.attributes.another.dim1": "1", "attributes.dim2": "C", "attributes.another.dim2": "10.5", "attributes.another.dim3": true, "attributes.a.much.deeper.nested.dim": "AC" }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:10.138Z","data": "30", "resource.attributes.dim1": "B", "resource.attributes.another.dim1": "2", "attributes.dim2": "D", "attributes.another.dim2": "20.5", "attributes.a.much.deeper.nested.dim": "BD" }' + - '{ "@timestamp": "2023-09-01T13:03:10.138Z","data": "30", "resource.attributes.dim1": "B", "resource.attributes.another.dim1": "2", "attributes.dim2": "D", "attributes.another.dim2": "20.5", "attributes.another.dim3": false, "attributes.a.much.deeper.nested.dim": "BD" }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - - '{ "@timestamp": "2023-09-01T13:03:10.238Z","data": "40", "resource.attributes.dim1": "B", "resource.attributes.another.dim1": "2", "attributes.dim2": "D", "attributes.another.dim2": "20.5", "attributes.a.much.deeper.nested.dim": "BD" }' + - '{ "@timestamp": "2023-09-01T13:03:10.238Z","data": "40", "resource.attributes.dim1": "B", "resource.attributes.another.dim1": "2", "attributes.dim2": "D", "attributes.another.dim2": "20.5", "attributes.another.dim3": false, "attributes.a.much.deeper.nested.dim": "BD" }' - do: search: @@ -586,7 +604,7 @@ dynamic templates with nesting: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "NNnsRFDTqKogyRBhOBQclM4BkssYqVppKiBimIqLDvTuF9nXxZWMD04YHQKL09tJYL5G4yo" } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -605,7 +623,7 @@ dynamic templates with nesting: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "NNnsRFDTqKogyRBhOBQclM4BkssYqVppKiBimIqLDvTuF9nXxZWMD04YHQKL09tJYL5G4yo" } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -624,7 +642,7 @@ dynamic templates with nesting: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "NNnsRFDTqKogyRBhOBQclM4BkssYqVppKiBimIqLDvTuF9nXxZWMD04YHQKL09tJYL5G4yo" } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -643,7 +661,26 @@ dynamic templates with nesting: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "NNnsRFDTqKogyRBhOBQclM4BkssYqVppKiBimIqLDvTuF9nXxZWMD04YHQKL09tJYL5G4yo" } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } + - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } + + - do: + search: + index: k9s + body: + size: 0 + aggs: + filterA: + filter: + term: + another.dim3: true + aggs: + tsids: + terms: + field: _tsid + + - length: { aggregations.filterA.tsids.buckets: 1 } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } - do: @@ -662,7 +699,7 @@ dynamic templates with nesting: field: _tsid - length: { aggregations.filterA.tsids.buckets: 1 } - - match: { aggregations.filterA.tsids.buckets.0.key: "NNnsRFDTqKogyRBhOBQclM4BkssYqVppKiBimIqLDvTuF9nXxZWMD04YHQKL09tJYL5G4yo" } + - match: { aggregations.filterA.tsids.buckets.0.key: "OFP9EtCzqs8Sp7Rn2I9NahMBkssYqVppKnpcfycgYpiKiw707hfZ18UMdd8dUGmp6bH35LX6Gni-" } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } --- @@ -763,6 +800,19 @@ dynamic templates with incremental indexing: - '{ "@timestamp": "2023-09-01T13:06:10.138Z","data": "330", "attributes.a.much.deeper.nested.dim": "BD" }' - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' - '{ "@timestamp": "2023-09-01T13:06:10.238Z","data": "340", "attributes.a.much.deeper.nested.dim": "BD" }' + - do: + bulk: + index: k9s + refresh: true + body: + - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:05:08.138Z","data": "210", "resource.attributes.another.deeper.dim3": true }' + - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:05:09.138Z","data": "220", "resource.attributes.another.deeper.dim3": true }' + - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:05:10.138Z","data": "230", "resource.attributes.another.deeper.dim3": false }' + - '{ "create": { "dynamic_templates": { "data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:05:10.238Z","data": "240", "resource.attributes.another.deeper.dim3": false }' - do: search: @@ -770,7 +820,7 @@ dynamic templates with incremental indexing: body: size: 0 - - match: { hits.total.value: 16 } + - match: { hits.total.value: 20 } - do: search: @@ -862,6 +912,24 @@ dynamic templates with incremental indexing: - length: { aggregations.filterA.tsids.buckets: 1 } - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } + - do: + search: + index: k9s + body: + size: 0 + aggs: + filterA: + filter: + term: + another.deeper.dim3: true + aggs: + tsids: + terms: + field: _tsid + + - length: { aggregations.filterA.tsids.buckets: 1 } + - match: { aggregations.filterA.tsids.buckets.0.doc_count: 2 } + --- subobject in passthrough object auto flatten: - requires: @@ -1052,3 +1120,113 @@ dimensions with ignore_malformed and ignore_above: - length: { aggregations.keyword_dims.buckets: 1 } - match: { aggregations.keyword_dims.buckets.0.key: "foo" } - match: { aggregations.keyword_dims.buckets.0.doc_count: 2 } + +--- +non string dimension fields: + - requires: + cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"] + reason: support for priority in passthrough objects + - do: + allowed_warnings: + - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" + indices.put_index_template: + name: my-dynamic-template + body: + index_patterns: [k9s*] + data_stream: {} + template: + settings: + index: + number_of_shards: 1 + mode: time_series + time_series: + start_time: 2023-08-31T13:03:08.138Z + + mappings: + properties: + attributes: + type: passthrough + dynamic: true + time_series_dimension: true + priority: 0 + metrics: + type: passthrough + priority: 1 + dynamic_templates: + - counter_metric: + mapping: + type: integer + time_series_metric: counter + - ip_attributes: + path_match: "*.ip" + match_mapping_type: string + mapping: + type: ip + - strings_as_keywords: + match_mapping_type: string + mapping: + type: keyword + - double_as_double: + match_mapping_type: double + mapping: + type: double + + - do: + bulk: + index: k9s + refresh: true + body: + - '{ "create": { "dynamic_templates": { "metrics.data": "counter_metric" } } }' + - '{ "@timestamp": "2023-09-01T13:03:08.138Z", "metrics.data": "10", "attributes.string": "foo", "attributes.boolean": true, "attributes.integer": 1, "attributes.double": 1.1, "attributes.host.ip": "127.0.0.1" }' + - is_false: errors + + - do: + search: + index: k9s + body: + query: + bool: + must: + - term: + attributes.string: + value: foo + - term: + attributes.boolean: + value: true + - term: + attributes.integer: + value: 1 + - term: + attributes.double: + value: 1.1 + - term: + attributes.host.ip: + value: 127.0.0.1 + size: 1 + fields: [ "*" ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.attributes\.string: [ "foo" ] } + - match: { hits.hits.0.fields.attributes\.boolean: [ true ] } + - match: { hits.hits.0.fields.attributes\.integer: [ 1 ] } + - match: { hits.hits.0.fields.attributes\.double: [ 1.1 ] } + - match: { hits.hits.0.fields.attributes\.host\.ip: [ "127.0.0.1" ] } + + - do: + indices.get_data_stream: + name: k9s + - set: { data_streams.0.indices.0.index_name: idx0name } + + - do: + indices.get_mapping: + index: $idx0name + expand_wildcards: hidden + - match: { .$idx0name.mappings.properties.attributes.properties.string.type: 'keyword' } + - match: { .$idx0name.mappings.properties.attributes.properties.string.time_series_dimension: true } + - match: { .$idx0name.mappings.properties.attributes.properties.boolean.type: 'boolean' } + - match: { .$idx0name.mappings.properties.attributes.properties.boolean.time_series_dimension: true } + - match: { .$idx0name.mappings.properties.attributes.properties.integer.type: 'long' } + - match: { .$idx0name.mappings.properties.attributes.properties.integer.time_series_dimension: true } + - match: { .$idx0name.mappings.properties.attributes.properties.double.type: 'double' } + - match: { .$idx0name.mappings.properties.attributes.properties.double.time_series_dimension: true } + - match: { .$idx0name.mappings.properties.attributes.properties.host\.ip.type: 'ip' } + - match: { .$idx0name.mappings.properties.attributes.properties.host\.ip.time_series_dimension: true } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 54ce32eb13207..991504b27f65f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -123,7 +123,7 @@ teardown: - match: { hits.hits.0._source.document.source.foo: 'bar' } - match: { hits.hits.0._source.error.type: 'fail_processor_exception' } - match: { hits.hits.0._source.error.message: 'error_message' } - - contains: { hits.hits.0._source.error.stack_trace: 'org.elasticsearch.ingest.common.FailProcessorException: error_message' } + - contains: { hits.hits.0._source.error.stack_trace: 'error_message' } - length: { hits.hits.0._source.error.pipeline_trace: 2 } - match: { hits.hits.0._source.error.pipeline_trace.0: 'parent_failing_pipeline' } - match: { hits.hits.0._source.error.pipeline_trace.1: 'failing_pipeline' } @@ -207,7 +207,6 @@ teardown: - match: { hits.hits.0._source.error.type: 'document_parsing_exception' } - contains: { hits.hits.0._source.error.message: "failed to parse field [count] of type [long] in document with id " } - contains: { hits.hits.0._source.error.message: "Preview of field's value: 'invalid value'" } - - contains: { hits.hits.0._source.error.stack_trace: "org.elasticsearch.index.mapper.DocumentParsingException: " } - contains: { hits.hits.0._source.error.stack_trace: "failed to parse field [count] of type [long] in document with id" } - contains: { hits.hits.0._source.error.stack_trace: "Preview of field's value: 'invalid value'" } @@ -319,4 +318,433 @@ teardown: index: .fs-destination-* - length: { hits.hits: 1 } - match: { hits.hits.0._index: "/\\.fs-destination-data-stream-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - - match: { hits.hits.0._source.document.index: 'destination-data-stream' } + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + +--- +"Failure redirects to original failure store during index change if self referenced": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "failing_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set": { + "field": "_index", + "value": "logs-elsewhere" + } + }, + { + "script": { + "source": "ctx.object.data = ctx.object" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "failing_pipeline" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + object: + data: + field: 'someValue' + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.object.data.field: 'someValue' } + - match: { hits.hits.0._source.error.type: 'illegal_argument_exception' } + - contains: { hits.hits.0._source.error.message: 'Failed to generate the source document for ingest pipeline' } + - contains: { hits.hits.0._source.error.stack_trace: 'Failed to generate the source document for ingest pipeline' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'failing_pipeline' } + - match: { hits.hits.0._source.error.pipeline: 'failing_pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged + +--- +"Failure redirects to original failure store during index change if final pipeline changes target": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "change_index_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set": { + "field": "_index", + "value": "logs-elsewhere" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + final_pipeline: "change_index_pipeline" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'illegal_state_exception' } + - contains: { hits.hits.0._source.error.message: "final pipeline [change_index_pipeline] can't change the target index" } + - contains: { hits.hits.0._source.error.stack_trace: "final pipeline [change_index_pipeline] can't change the target index" } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'change_index_pipeline' } + - match: { hits.hits.0._source.error.pipeline: 'change_index_pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged + +--- +"Failure redirects to correct failure store when index loop is detected": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "send_to_destination" + body: > + { + "description": "_description", + "processors": [ + { + "reroute": { + "tag": "reroute-tag-1", + "destination": "destination-data-stream" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "send_back_to_original" + body: > + { + "description": "_description", + "processors": [ + { + "reroute": { + "tag": "reroute-tag-2", + "destination": "logs-foobar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "send_to_destination" + + - do: + allowed_warnings: + - "index template [destination_logs_template] has index patterns [destination-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [destination_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: destination_logs_template + body: + index_patterns: destination-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "send_back_to_original" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + + - do: + indices.get_data_stream: + name: destination-data-stream + - match: { data_streams.0.name: destination-data-stream } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-destination-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-destination-data-stream-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: destination-data-stream + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-destination-data-stream-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-destination-data-stream-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'illegal_state_exception' } + - contains: { hits.hits.0._source.error.message: 'index cycle detected' } + - contains: { hits.hits.0._source.error.stack_trace: 'index cycle detected' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'send_back_to_original' } + - match: { hits.hits.0._source.error.pipeline: 'send_back_to_original' } + + - do: + indices.delete_data_stream: + name: destination-data-stream + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-destination-data-stream-* + - is_true: acknowledged + +--- +"Failure redirects to correct failure store when pipeline loop is detected": + - requires: + cluster_features: [ "gte_v8.15.0" ] + reason: "data stream failure stores REST structure changed in 8.15+" + test_runner_features: [ allowed_warnings, contains ] + + - do: + ingest.put_pipeline: + id: "step_1" + body: > + { + "description": "_description", + "processors": [ + { + "pipeline": { + "tag": "step-1", + "name": "step_2" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "step_2" + body: > + { + "description": "_description", + "processors": [ + { + "pipeline": { + "tag": "step-2", + "name": "step_1" + } + } + ] + } + - match: { acknowledged: true } + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + index: + default_pipeline: "step_1" + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + foo: bar + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: { } } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.foo + - not_exists: hits.hits.0._source.document.id + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.foo: 'bar' } + - match: { hits.hits.0._source.error.type: 'graph_structure_exception' } + - contains: { hits.hits.0._source.error.message: 'Cycle detected for pipeline: step_1' } + - contains: { hits.hits.0._source.error.stack_trace: 'Cycle detected for pipeline: step_1' } + - match: { hits.hits.0._source.error.pipeline_trace.0: 'step_1' } + - match: { hits.hits.0._source.error.pipeline_trace.1: 'step_2' } + - match: { hits.hits.0._source.error.pipeline: 'step_2' } + - match: { hits.hits.0._source.error.processor_tag: 'step-2' } + - match: { hits.hits.0._source.error.processor_type: 'pipeline' } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml new file mode 100644 index 0000000000000..ef36f283fe237 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml @@ -0,0 +1,104 @@ +setup: + - requires: + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was released as tech preview in 8.11" + test_runner_features: allowed_warnings + - do: + allowed_warnings: + - "index template [template-with-lifecycle] has index patterns [managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation" + indices.put_index_template: + name: template-with-lifecycle + body: + index_patterns: [ managed-data-stream ] + template: + settings: + index.number_of_replicas: 0 + lifecycle: + data_retention: "30d" + data_stream: { } + - do: + indices.create_data_stream: + name: managed-data-stream +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: null + data_streams.lifecycle.retention.default: null + +--- +"Retrieve effective retention via the data stream API": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index} + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.data_retention: '30d' } + - match: { data_streams.0.lifecycle.effective_retention: '30d'} + - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration'} + +--- +"Retrieve effective retention with explain": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /{index}/_lifecycle/explain + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: "7d" + - is_true: acknowledged + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - set: + data_streams.0.indices.0.index_name: backing_index + + - do: + indices.explain_data_lifecycle: + index: managed-data-stream + include_defaults: true + - match: { indices.$backing_index.managed_by_lifecycle: true } + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } + - match: { indices.$backing_index.lifecycle.effective_retention: '7d' } + - match: { indices.$backing_index.lifecycle.retention_determined_by: 'max_global_retention' } + +--- +"Retrieve effective retention with data stream lifecycle": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index}/_lifecycle + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.put_data_lifecycle: + name: "managed-data-stream" + body: {} + - is_true: acknowledged + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.default: "7d" + - do: + indices.get_data_lifecycle: + name: "managed-data-stream" + - length: { data_streams: 1} + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.effective_retention: '7d' } + - match: { data_streams.0.lifecycle.retention_determined_by: 'default_global_retention' } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 0ff34cf687500..f1c592e6e8345 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -8,14 +8,23 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.GatewayService; @@ -26,6 +35,7 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import java.util.Arrays; @@ -33,11 +43,16 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.function.Consumer; import java.util.function.Function; +import java.util.stream.IntStream; +import static org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestParameters.Metric.INGEST; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.test.NodeRoles.onlyRole; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // Ideally I like this test to live in the server module, but otherwise a large part of the ScriptProcessor @@ -179,9 +194,9 @@ public Settings onNodeStopped(String nodeName) { public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exception { internalCluster().startNode(); - clusterAdmin().preparePutStoredScript().setId("1").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("1", Strings.format(""" {"script": {"lang": "%s", "source": "my_script"} } - """, MockScriptEngine.NAME)), XContentType.JSON).get(); + """, MockScriptEngine.NAME)); BytesReference pipeline = new BytesArray(""" { "processors" : [ @@ -326,4 +341,89 @@ public boolean validateClusterForming() { source = client().prepareGet("index", "fails").get(timeout).getSource(); assertNull(source); } + + /** + * This test is for confirming that forwarded bulk requests do not use system_write thread pool + * for non-system indexes. Before this fix, we were using system_write thread pool for all forwarded + * bulk requests causing the system_write thread pool to get overloaded. + */ + public void testForwardBulkWithSystemWritePoolDisabled() throws Exception { + // Create a node with master only role and a node with ingest role + final String masterOnlyNode = internalCluster().startMasterOnlyNode(); + final String ingestNode = internalCluster().startNode(); + + ensureStableCluster(2); + + // Create Bulk Request + createIndex("index"); + + BytesReference source = new BytesArray(""" + { + "processors" : [ + {"set" : {"field": "y", "value": 0}} + ] + }"""); + + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + clusterAdmin().putPipeline(putPipelineRequest).get(); + + int numRequests = scaledRandomIntBetween(32, 128); + BulkRequest bulkRequest = new BulkRequest(); + BulkResponse response; + for (int i = 0; i < numRequests; i++) { + IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)).setPipeline("_id"); + indexRequest.source(Requests.INDEX_CONTENT_TYPE, "x", 1); + bulkRequest.add(indexRequest); + } + assertThat(numRequests, equalTo(bulkRequest.requests().size())); + + // Block system_write thread pool on the ingest node + final ThreadPool ingestNodeThreadPool = internalCluster().getInstance(ThreadPool.class, ingestNode); + final var blockingLatch = new CountDownLatch(1); + try { + blockSystemWriteThreadPool(blockingLatch, ingestNodeThreadPool); + // Send bulk request to master only node, so it will forward it to the ingest node. + response = safeGet(client(masterOnlyNode).bulk(bulkRequest)); + } finally { + blockingLatch.countDown(); + } + + // Make sure the requests are processed (even though we blocked system_write thread pool above). + assertThat(response.getItems().length, equalTo(numRequests)); + assertFalse(response.hasFailures()); + + // Check Node Ingest stats + NodesStatsResponse nodesStatsResponse = clusterAdmin().nodesStats(new NodesStatsRequest(ingestNode).addMetric(INGEST)).actionGet(); + assertThat(nodesStatsResponse.getNodes().size(), equalTo(1)); + + NodeStats stats = nodesStatsResponse.getNodes().get(0); + assertThat(stats.getIngestStats().totalStats().ingestCount(), equalTo((long) numRequests)); + assertThat(stats.getIngestStats().totalStats().ingestFailedCount(), equalTo(0L)); + final var pipelineStats = stats.getIngestStats().pipelineStats().get(0); + assertThat(pipelineStats.pipelineId(), equalTo("_id")); + assertThat(pipelineStats.stats().ingestCount(), equalTo((long) numRequests)); + + MultiGetResponse docListResponse = safeGet( + client().prepareMultiGet().addIds("index", IntStream.range(0, numRequests).mapToObj(String::valueOf).toList()).execute() + ); + + assertThat(docListResponse.getResponses().length, equalTo(numRequests)); + Map document; + for (int i = 0; i < numRequests; i++) { + document = docListResponse.getResponses()[i].getResponse().getSourceAsMap(); + assertThat(document.get("y"), equalTo(0)); + } + } + + private void blockSystemWriteThreadPool(CountDownLatch blockingLatch, ThreadPool threadPool) { + assertThat(blockingLatch.getCount(), greaterThan(0L)); + final var executor = threadPool.executor(ThreadPool.Names.SYSTEM_WRITE); + // Add tasks repeatedly until we get an EsRejectedExecutionException which indicates that the threadpool and its queue are full. + expectThrows(EsRejectedExecutionException.class, () -> { + // noinspection InfiniteLoopStatement + while (true) { + executor.execute(() -> safeAwait(blockingLatch)); + } + }); + } } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java deleted file mode 100644 index 16a8013ae9c4a..0000000000000 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverWithPipelinesIT.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugins.internal; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.ingest.common.IngestCommonPlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.FilterXContentParserWrapper; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DocumentSizeObserverWithPipelinesIT extends ESIntegTestCase { - - private static String TEST_INDEX_NAME = "test-index-name"; - // the assertions are done in plugin which is static and will be created by ES server. - // hence a static flag to make sure it is indeed used - public static volatile boolean hasWrappedParser; - public static AtomicLong providedFixedSize = new AtomicLong(); - - public void testDocumentIsReportedWithPipelines() throws Exception { - hasWrappedParser = false; - // pipeline adding fields, changing destination is not affecting reporting - final BytesReference pipelineBody = new BytesArray(""" - { - "processors": [ - { - "set": { - "field": "my-text-field", - "value": "xxxx" - } - }, - { - "set": { - "field": "my-boolean-field", - "value": true - } - } - ] - } - """); - clusterAdmin().putPipeline(new PutPipelineRequest("pipeline", pipelineBody, XContentType.JSON)).actionGet(); - - client().index( - new IndexRequest(TEST_INDEX_NAME).setPipeline("pipeline") - .id("1") - .source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); - assertBusy(() -> { - // ingest node has used an observer that was counting #map operations - // and passed that info to newFixedSize observer in TransportShardBulkAction - assertTrue(hasWrappedParser); - assertThat(providedFixedSize.get(), equalTo(1L)); - }); - } - - @Override - protected Collection> nodePlugins() { - return List.of(TestDocumentParsingProviderPlugin.class, IngestCommonPlugin.class); - } - - public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { - - public TestDocumentParsingProviderPlugin() {} - - @Override - public DocumentParsingProvider getDocumentParsingProvider() { - // returns a static instance, because we want to assert that the wrapping is called only once - return new DocumentParsingProvider() { - @Override - public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { - if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { - long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); - providedFixedSize.set(normalisedBytesParsed); - return new TestDocumentSizeObserver(normalisedBytesParsed); - } - return new TestDocumentSizeObserver(0L); - } - - @Override - public DocumentSizeReporter newDocumentSizeReporter( - String indexName, - MapperService mapperService, - DocumentSizeAccumulator documentSizeAccumulator - ) { - return DocumentSizeReporter.EMPTY_INSTANCE; - } - }; - } - } - - public static class TestDocumentSizeObserver implements DocumentSizeObserver { - long mapCounter = 0; - long wrapperCounter = 0; - - public TestDocumentSizeObserver(long mapCounter) { - this.mapCounter = mapCounter; - } - - @Override - public XContentParser wrapParser(XContentParser xContentParser) { - wrapperCounter++; - hasWrappedParser = true; - return new FilterXContentParserWrapper(xContentParser) { - - @Override - public Map map() throws IOException { - mapCounter++; - return super.map(); - } - }; - } - - @Override - public long normalisedBytesParsed() { - return mapCounter; - } - - } - -} diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java new file mode 100644 index 0000000000000..7f0910ea5cc4d --- /dev/null +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins.internal; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.FilterXContentParserWrapper; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class XContentMeteringParserDecoratorWithPipelinesIT extends ESIntegTestCase { + + private static String TEST_INDEX_NAME = "test-index-name"; + // the assertions are done in plugin which is static and will be created by ES server. + // hence a static flag to make sure it is indeed used + public static volatile boolean hasWrappedParser; + public static AtomicLong providedFixedSize = new AtomicLong(); + + public void testDocumentIsReportedWithPipelines() throws Exception { + hasWrappedParser = false; + // pipeline adding fields, changing destination is not affecting reporting + final BytesReference pipelineBody = new BytesArray(""" + { + "processors": [ + { + "set": { + "field": "my-text-field", + "value": "xxxx" + } + }, + { + "set": { + "field": "my-boolean-field", + "value": true + } + } + ] + } + """); + clusterAdmin().putPipeline(new PutPipelineRequest("pipeline", pipelineBody, XContentType.JSON)).actionGet(); + + client().index( + new IndexRequest(TEST_INDEX_NAME).setPipeline("pipeline") + .id("1") + .source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) + ).actionGet(); + assertBusy(() -> { + // ingest node has used an observer that was counting #map operations + // and passed that info to newFixedSize observer in TransportShardBulkAction + assertTrue(hasWrappedParser); + assertThat(providedFixedSize.get(), equalTo(1L)); + }); + } + + @Override + protected Collection> nodePlugins() { + return List.of(TestDocumentParsingProviderPlugin.class, IngestCommonPlugin.class); + } + + public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { + + public TestDocumentParsingProviderPlugin() {} + + @Override + public DocumentParsingProvider getDocumentParsingProvider() { + // returns a static instance, because we want to assert that the wrapping is called only once + return new DocumentParsingProvider() { + @Override + public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { + long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); + providedFixedSize.set(normalisedBytesParsed); + return new TestXContentMeteringParserDecorator(normalisedBytesParsed); + } + return new TestXContentMeteringParserDecorator(0L); + } + + @Override + public DocumentSizeReporter newDocumentSizeReporter( + String indexName, + MapperService mapperService, + DocumentSizeAccumulator documentSizeAccumulator + ) { + return DocumentSizeReporter.EMPTY_INSTANCE; + } + }; + } + } + + public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { + long mapCounter = 0; + + public TestXContentMeteringParserDecorator(long mapCounter) { + this.mapCounter = mapCounter; + } + + @Override + public XContentParser decorate(XContentParser xContentParser) { + hasWrappedParser = true; + return new FilterXContentParserWrapper(xContentParser) { + + @Override + public Map map() throws IOException { + mapCounter++; + return super.map(); + } + }; + } + + @Override + public ParsedDocument.DocumentSize meteredDocumentSize() { + return new ParsedDocument.DocumentSize(mapCounter, 0); + } + } + +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java index 27ef5a10dd5c2..0377da53846d5 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java @@ -225,7 +225,7 @@ private static Flow buildFlow( } flow.protocol = Transport.fromObject(protocol); - switch (flow.protocol) { + switch (flow.protocol.getType()) { case Tcp, Udp, Sctp -> { flow.sourcePort = parseIntFromObjectOrString(sourcePort.get(), "source port"); if (flow.sourcePort < 1 || flow.sourcePort > 65535) { @@ -336,12 +336,12 @@ public CommunityIdProcessor create( */ public static final class Flow { - private static final List TRANSPORTS_WITH_PORTS = List.of( - Transport.Tcp, - Transport.Udp, - Transport.Sctp, - Transport.Icmp, - Transport.IcmpIpV6 + private static final List TRANSPORTS_WITH_PORTS = List.of( + Transport.Type.Tcp, + Transport.Type.Udp, + Transport.Type.Sctp, + Transport.Type.Icmp, + Transport.Type.IcmpIpV6 ); InetAddress source; @@ -362,20 +362,21 @@ boolean isOrdered() { } byte[] toBytes() { - boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protocol); + Transport.Type protoType = protocol.getType(); + boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protoType); int len = source.getAddress().length + destination.getAddress().length + 2 + (hasPort ? 4 : 0); ByteBuffer bb = ByteBuffer.allocate(len); boolean isOneWay = false; - if (protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) { + if (protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) { // ICMP protocols populate port fields with ICMP data - Integer equivalent = IcmpType.codeEquivalent(icmpType, protocol == Transport.IcmpIpV6); + Integer equivalent = IcmpType.codeEquivalent(icmpType, protoType == Transport.Type.IcmpIpV6); isOneWay = equivalent == null; sourcePort = icmpType; destinationPort = equivalent == null ? icmpCode : equivalent; } - boolean keepOrder = isOrdered() || ((protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) && isOneWay); + boolean keepOrder = isOrdered() || ((protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) && isOneWay); bb.put(keepOrder ? source.getAddress() : destination.getAddress()); bb.put(keepOrder ? destination.getAddress() : source.getAddress()); bb.put(toUint16(protocol.getTransportNumber() << 8)); @@ -397,39 +398,63 @@ String toCommunityId(byte[] seed) { } } - public enum Transport { - Icmp(1), - Igmp(2), - Tcp(6), - Udp(17), - Gre(47), - IcmpIpV6(58), - Eigrp(88), - Ospf(89), - Pim(103), - Sctp(132); - - private final int transportNumber; + static class Transport { + public enum Type { + Unknown(-1), + Icmp(1), + Igmp(2), + Tcp(6), + Udp(17), + Gre(47), + IcmpIpV6(58), + Eigrp(88), + Ospf(89), + Pim(103), + Sctp(132); + + private final int transportNumber; + + private static final Map TRANSPORT_NAMES; + + static { + TRANSPORT_NAMES = new HashMap<>(); + TRANSPORT_NAMES.put("icmp", Icmp); + TRANSPORT_NAMES.put("igmp", Igmp); + TRANSPORT_NAMES.put("tcp", Tcp); + TRANSPORT_NAMES.put("udp", Udp); + TRANSPORT_NAMES.put("gre", Gre); + TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); + TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); + TRANSPORT_NAMES.put("eigrp", Eigrp); + TRANSPORT_NAMES.put("ospf", Ospf); + TRANSPORT_NAMES.put("pim", Pim); + TRANSPORT_NAMES.put("sctp", Sctp); + } - private static final Map TRANSPORT_NAMES; + Type(int transportNumber) { + this.transportNumber = transportNumber; + } - static { - TRANSPORT_NAMES = new HashMap<>(); - TRANSPORT_NAMES.put("icmp", Icmp); - TRANSPORT_NAMES.put("igmp", Igmp); - TRANSPORT_NAMES.put("tcp", Tcp); - TRANSPORT_NAMES.put("udp", Udp); - TRANSPORT_NAMES.put("gre", Gre); - TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); - TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); - TRANSPORT_NAMES.put("eigrp", Eigrp); - TRANSPORT_NAMES.put("ospf", Ospf); - TRANSPORT_NAMES.put("pim", Pim); - TRANSPORT_NAMES.put("sctp", Sctp); + public int getTransportNumber() { + return transportNumber; + } } - Transport(int transportNumber) { + private Type type; + private int transportNumber; + + Transport(int transportNumber, Type type) { // Change constructor to public this.transportNumber = transportNumber; + this.type = type; + } + + Transport(Type type) { // Change constructor to public + this.transportNumber = type.getTransportNumber(); + this.type = type; + } + + public Type getType() { + return this.type; } public int getTransportNumber() { @@ -437,19 +462,26 @@ public int getTransportNumber() { } public static Transport fromNumber(int transportNumber) { - return switch (transportNumber) { - case 1 -> Icmp; - case 2 -> Igmp; - case 6 -> Tcp; - case 17 -> Udp; - case 47 -> Gre; - case 58 -> IcmpIpV6; - case 88 -> Eigrp; - case 89 -> Ospf; - case 103 -> Pim; - case 132 -> Sctp; - default -> throw new IllegalArgumentException("unknown transport protocol number [" + transportNumber + "]"); + if (transportNumber < 0 || transportNumber >= 255) { + // transport numbers range https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + throw new IllegalArgumentException("invalid transport protocol number [" + transportNumber + "]"); + } + + Type type = switch (transportNumber) { + case 1 -> Type.Icmp; + case 2 -> Type.Igmp; + case 6 -> Type.Tcp; + case 17 -> Type.Udp; + case 47 -> Type.Gre; + case 58 -> Type.IcmpIpV6; + case 88 -> Type.Eigrp; + case 89 -> Type.Ospf; + case 103 -> Type.Pim; + case 132 -> Type.Sctp; + default -> Type.Unknown; }; + + return new Transport(transportNumber, type); } public static Transport fromObject(Object o) { @@ -457,8 +489,8 @@ public static Transport fromObject(Object o) { return fromNumber(number.intValue()); } else if (o instanceof String protocolStr) { // check if matches protocol name - if (TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { - return TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT)); + if (Type.TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { + return new Transport(Type.TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT))); } // check if convertible to protocol number diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index 23bbaf62cec07..ae3de1f1c446f 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -30,6 +30,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentMap; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Supplier; @@ -45,7 +46,7 @@ public final class DateProcessor extends AbstractProcessor { private final String field; private final String targetField; private final List formats; - private final List, Function>> dateParsers; + private final List>> dateParsers; private final String outputFormat; DateProcessor( @@ -80,14 +81,12 @@ public final class DateProcessor extends AbstractProcessor { for (String format : formats) { DateFormat dateFormat = DateFormat.fromString(format); - dateParsers.add((params) -> { - var documentTimezone = timezone == null ? null : timezone.newInstance(params).execute(); - var documentLocale = locale == null ? null : locale.newInstance(params).execute(); - return Cache.INSTANCE.getOrCompute( + dateParsers.add( + (documentTimezone, documentLocale) -> Cache.INSTANCE.getOrCompute( new Cache.Key(format, documentTimezone, documentLocale), - () -> dateFormat.getFunction(format, newDateTimeZone(documentTimezone), newLocale(documentLocale)) - ); - }); + () -> dateFormat.getFunction(format, documentTimezone, documentLocale) + ) + ); } this.outputFormat = outputFormat; formatter = DateFormatter.forPattern(this.outputFormat); @@ -106,15 +105,27 @@ public IngestDocument execute(IngestDocument ingestDocument) { Object obj = ingestDocument.getFieldValue(field, Object.class); String value = null; if (obj != null) { - // Not use Objects.toString(...) here, because null gets changed to "null" which may confuse some date parsers + // Don't use Objects.toString(...) here, because null gets changed to "null" which may confuse some date parsers value = obj.toString(); } + // run (potential) mustache application just a single time for this document in order to + // extract the timezone and locale to use for date parsing + final ZoneId documentTimezone; + final Locale documentLocale; + final Map sourceAndMetadata = ingestDocument.getSourceAndMetadata(); + try { + documentTimezone = newDateTimeZone(timezone == null ? null : timezone.newInstance(sourceAndMetadata).execute()); + documentLocale = newLocale(locale == null ? null : locale.newInstance(sourceAndMetadata).execute()); + } catch (Exception e) { + throw new IllegalArgumentException("unable to parse date [" + value + "]", e); + } + ZonedDateTime dateTime = null; Exception lastException = null; - for (Function, Function> dateParser : dateParsers) { + for (BiFunction> dateParser : dateParsers) { try { - dateTime = dateParser.apply(ingestDocument.getSourceAndMetadata()).apply(value); + dateTime = dateParser.apply(documentTimezone, documentLocale).apply(value); break; } catch (Exception e) { // try the next parser and keep track of the exceptions @@ -255,6 +266,6 @@ Function getOrCompute(Key key, Supplier) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); } + public void testBeatsIanaNumberProtocolIPv4() throws Exception { + @SuppressWarnings("unchecked") + var network = (Map) event.get("network"); + network.put("iana_number", "4"); + network.remove("transport"); + @SuppressWarnings("unchecked") + var source = (Map) event.get("source"); + source.put("ip", "192.168.1.2"); + source.remove("port"); + @SuppressWarnings("unchecked") + var destination = (Map) event.get("destination"); + destination.put("ip", "10.1.2.3"); + destination.remove("port"); + testCommunityIdProcessor(event, "1:KXQzmk3bdsvD6UXj7dvQ4bM6Zvw="); + } + public void testIpv6() throws Exception { @SuppressWarnings("unchecked") var source = (Map) event.get("source"); @@ -201,10 +217,10 @@ public void testStringAndNumber() throws Exception { @SuppressWarnings("unchecked") var network = (Map) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); - network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Tcp.getTransportNumber())); + network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber())); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); // protocol number @@ -359,8 +375,13 @@ private void testCommunityIdProcessor(Map source, int seed, Stri } public void testTransportEnum() { - for (CommunityIdProcessor.Transport t : CommunityIdProcessor.Transport.values()) { - assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()), equalTo(t)); + for (CommunityIdProcessor.Transport.Type t : CommunityIdProcessor.Transport.Type.values()) { + if (t == CommunityIdProcessor.Transport.Type.Unknown) { + expectThrows(IllegalArgumentException.class, () -> CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber())); + continue; + } + + assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()).getType(), equalTo(t)); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index e5773c6c553df..532bf575bda74 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -29,6 +29,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -372,8 +374,8 @@ public void testCacheIsEvictedAfterReachMaxCapacity() { Function zonedDateTimeFunction1 = str -> ZonedDateTime.now(); Function zonedDateTimeFunction2 = str -> ZonedDateTime.now(); var cache = new DateProcessor.Cache(1); - var key1 = new DateProcessor.Cache.Key("format-1", ZoneId.systemDefault().toString(), Locale.ROOT.toString()); - var key2 = new DateProcessor.Cache.Key("format-2", ZoneId.systemDefault().toString(), Locale.ROOT.toString()); + var key1 = new DateProcessor.Cache.Key("format-1", ZoneId.systemDefault(), Locale.ROOT); + var key2 = new DateProcessor.Cache.Key("format-2", ZoneId.systemDefault(), Locale.ROOT); when(supplier1.get()).thenReturn(zonedDateTimeFunction1); when(supplier2.get()).thenReturn(zonedDateTimeFunction2); @@ -391,4 +393,36 @@ public void testCacheIsEvictedAfterReachMaxCapacity() { verify(supplier1, times(3)).get(); verify(supplier2, times(2)).get(); } + + public void testMustacheTemplateExecutesAtMostTwiceWithMultipleFormats() { + final TemplateScript.Factory factory = mock(TemplateScript.Factory.class); + final TemplateScript compiledScript = mock(TemplateScript.class); + when(factory.newInstance(any())).thenReturn(compiledScript); + when(compiledScript.execute()).thenReturn(null); + + final List matchFormats = List.of( + "dd/MM/yyyy", + "dd-MM-yyyy", + "uuuu-dd-MM", + "uuuu-MM-dd", + "TAI64N", + "epoch_millis", + "yyyy dd MM" + ); + DateProcessor dateProcessor = new DateProcessor( + randomAlphaOfLength(10), + null, + factory, + factory, + "date_as_string", + matchFormats, + "date_as_date" + ); + + Map document = new HashMap<>(); + document.put("date_as_string", "2010 12 06"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + dateProcessor.execute(ingestDocument); + verify(compiledScript, atMost(2)).execute(); + } } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 5bdb6da5c7b29..bc5bb165cd0d2 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -88,3 +88,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") } + +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java index ae811db226b06..92ec911dbf451 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/AbstractGeoIpIT.java @@ -16,17 +16,14 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.StreamsUtils; import org.junit.ClassRule; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.nio.file.Files; import java.nio.file.Path; import java.util.Collection; import java.util.List; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; + public abstract class AbstractGeoIpIT extends ESIntegTestCase { private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false; @@ -45,23 +42,7 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(final int nodeOrdinal, final Settings otherSettings) { final Path databasePath = createTempDir(); - try { - Files.createDirectories(databasePath); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), - databasePath.resolve("GeoLite2-City.mmdb") - ); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), - databasePath.resolve("GeoLite2-Country.mmdb") - ); - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), - databasePath.resolve("GeoLite2-ASN.mmdb") - ); - } catch (final IOException e) { - throw new UncheckedIOException(e); - } + copyDefaultDatabases(databasePath); return Settings.builder() .put("ingest.geoip.database_path", databasePath) .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), false) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java index 2d068373717d8..cc757c413713d 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.EnterpriseGeoIpTask; import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; @@ -54,13 +53,12 @@ public class EnterpriseGeoIpDownloaderIT extends ESIntegTestCase { private static final String DATABASE_TYPE = "GeoIP2-City"; - private static final boolean useFixture = Booleans.parseBoolean(System.getProperty("geoip_use_service", "false")) == false; @ClassRule - public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(useFixture, DATABASE_TYPE); + public static final EnterpriseGeoIpHttpFixture fixture = new EnterpriseGeoIpHttpFixture(DATABASE_TYPE); protected String getEndpoint() { - return useFixture ? fixture.getAddress() : null; + return fixture.getAddress(); } @Override @@ -71,11 +69,9 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { builder.setSecureSettings(secureSettings) .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true); - if (getEndpoint() != null) { - // note: this is using the enterprise fixture for the regular downloader, too, as - // a slightly hacky way of making the regular downloader not actually download any files - builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint()); - } + // note: this is using the enterprise fixture for the regular downloader, too, as + // a slightly hacky way of making the regular downloader not actually download any files + builder.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint()); return builder.build(); } @@ -94,9 +90,7 @@ public void testEnterpriseDownloaderTask() throws Exception { * was updated with information from the database. * Note that the "enterprise database" is actually just a geolite database being loaded by the GeoIpHttpFixture. */ - if (getEndpoint() != null) { - EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint(); - } + EnterpriseGeoIpDownloader.DEFAULT_MAXMIND_ENDPOINT = getEndpoint(); final String pipelineName = "enterprise_geoip_pipeline"; final String indexName = "enterprise_geoip_test_index"; final String sourceField = "ip"; diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index f7ab384c69bf1..d994bd70eb7a0 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -66,6 +66,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; @@ -688,12 +689,7 @@ private void setupDatabasesInConfigDirectory() throws Exception { .forEach(path -> { try { Files.createDirectories(path); - Files.copy(GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-City.mmdb"), path.resolve("GeoLite2-City.mmdb")); - Files.copy(GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-ASN.mmdb"), path.resolve("GeoLite2-ASN.mmdb")); - Files.copy( - GeoIpDownloaderIT.class.getResourceAsStream("/GeoLite2-Country.mmdb"), - path.resolve("GeoLite2-Country.mmdb") - ); + copyDefaultDatabases(path); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java index 8d8b0b4215b3f..87daefab7b428 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java @@ -22,10 +22,8 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; -import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -34,7 +32,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.ingest.geoip.GeoIpProcessorFactoryTests.copyDatabaseFiles; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -68,8 +67,8 @@ public void test() throws Exception { when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); DatabaseNodeService databaseNodeService = createRegistry(geoIpConfigDir, geoIpTmpDir, clusterService); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); lazyLoadReaders(databaseNodeService); @@ -138,18 +137,14 @@ public void test() throws Exception { assertThat(previous1.current(), equalTo(-1)); }); } else { - Files.copy( - ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - geoIpTmpDir.resolve("GeoLite2-City.mmdb"), - StandardCopyOption.REPLACE_EXISTING - ); + copyDatabase("GeoLite2-City-Test.mmdb", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); } DatabaseReaderLazyLoader previous2 = databaseNodeService.get("GeoLite2-City-Test.mmdb"); - InputStream source = ConfigDatabases.class.getResourceAsStream( - i % 2 == 0 ? "/GeoIP2-City-Test.mmdb" : "/GeoLite2-City-Test.mmdb" + copyDatabase( + i % 2 == 0 ? "GeoIP2-City-Test.mmdb" : "GeoLite2-City-Test.mmdb", + geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb") ); - Files.copy(source, geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"), StandardCopyOption.REPLACE_EXISTING); databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); DatabaseReaderLazyLoader current1 = databaseNodeService.get("GeoLite2-City.mmdb"); @@ -194,7 +189,7 @@ private static DatabaseNodeService createRegistry(Path geoIpConfigDir, Path geoI throws IOException { GeoIpCache cache = new GeoIpCache(0); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); DatabaseNodeService databaseNodeService = new DatabaseNodeService( geoIpTmpDir, mock(Client.class), diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index e39705a71f56c..82b9e930280b7 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -592,7 +592,7 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas } case ISP_ORGANIZATION_NAME -> { if (ispOrganization != null) { - geoData.put("isp_organization", ispOrganization); + geoData.put("isp_organization_name", ispOrganization); } } case MOBILE_COUNTRY_CODE -> { @@ -660,7 +660,7 @@ private Map retrieveIspGeoData(GeoIpDatabase geoIpDatabase, Inet } case ISP_ORGANIZATION_NAME -> { if (ispOrganization != null) { - geoData.put("isp_organization", ispOrganization); + geoData.put("isp_organization_name", ispOrganization); } } case MOBILE_COUNTRY_CODE -> { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java index 43aacee956279..4f9b9062332e4 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java @@ -25,12 +25,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; import org.elasticsearch.ingest.geoip.direct.DeleteDatabaseConfigurationAction.Request; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java index a14a143e3f404..ae090dc4c64f6 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java @@ -17,10 +17,10 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java index 540be68671d38..490a9edbec89a 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java @@ -24,13 +24,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsAction.java index c7a1337f20e59..81836cda29568 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsAction.java @@ -166,7 +166,7 @@ public static class NodeResponse extends BaseNodeResponse { protected NodeResponse(StreamInput in) throws IOException { super(in); downloaderStats = in.readBoolean() ? new GeoIpDownloaderStats(in) : null; - if (in.getTransportVersion().onOrAfter(TransportVersions.GEOIP_CACHE_STATS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { cacheStats = in.readBoolean() ? new CacheStats(in) : null; } else { cacheStats = null; @@ -217,7 +217,7 @@ public void writeTo(StreamOutput out) throws IOException { if (downloaderStats != null) { downloaderStats.writeTo(out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.GEOIP_CACHE_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeBoolean(cacheStats != null); if (cacheStats != null) { cacheStats.writeTo(out); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsTransportAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsTransportAction.java index 6a3aa81f82e9e..7a4b1fec900e9 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsTransportAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsTransportAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.ingest.geoip.DatabaseNodeService; import org.elasticsearch.ingest.geoip.GeoIpDownloader; @@ -22,6 +21,7 @@ import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction.NodeResponse; import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction.Request; import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction.Response; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java index 01d7cdc9b9d5c..7b962fed0ca83 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/ConfigDatabasesTests.java @@ -20,12 +20,11 @@ import org.junit.After; import org.junit.Before; -import java.io.IOException; -import java.nio.file.CopyOption; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -62,8 +61,8 @@ public void testLocalDatabasesEmptyConfig() throws Exception { public void testDatabasesConfigDir() throws Exception { Path configDir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoIP2-City-Test.mmdb"), configDir.resolve("GeoIP2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), configDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoIP2-City-Test.mmdb", configDir.resolve("GeoIP2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); ConfigDatabases configDatabases = new ConfigDatabases(configDir, new GeoIpCache(0)); configDatabases.initialize(resourceWatcherService); @@ -92,9 +91,9 @@ public void testDatabasesDynamicUpdateConfigDir() throws Exception { assertThat(loader.getDatabaseType(), equalTo("GeoLite2-Country")); } - CopyOption option = StandardCopyOption.REPLACE_EXISTING; - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoIP2-City-Test.mmdb"), configDir.resolve("GeoIP2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), configDir.resolve("GeoLite2-City.mmdb"), option); + copyDatabase("GeoIP2-City-Test.mmdb", configDir.resolve("GeoIP2-City.mmdb")); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); + assertBusy(() -> { assertThat(configDatabases.getConfigDatabases().size(), equalTo(4)); DatabaseReaderLazyLoader loader = configDatabases.getDatabase("GeoLite2-ASN.mmdb"); @@ -116,7 +115,8 @@ public void testDatabasesDynamicUpdateConfigDir() throws Exception { public void testDatabasesUpdateExistingConfDatabase() throws Exception { Path configDir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City.mmdb"), configDir.resolve("GeoLite2-City.mmdb")); + copyDatabase("GeoLite2-City.mmdb", configDir); + GeoIpCache cache = new GeoIpCache(1000); // real cache to test purging of entries upon a reload ConfigDatabases configDatabases = new ConfigDatabases(configDir, cache); configDatabases.initialize(resourceWatcherService); @@ -131,11 +131,7 @@ public void testDatabasesUpdateExistingConfDatabase() throws Exception { assertThat(cache.count(), equalTo(1)); } - Files.copy( - ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - configDir.resolve("GeoLite2-City.mmdb"), - StandardCopyOption.REPLACE_EXISTING - ); + copyDatabase("GeoLite2-City-Test.mmdb", configDir.resolve("GeoLite2-City.mmdb")); assertBusy(() -> { assertThat(configDatabases.getConfigDatabases().size(), equalTo(1)); assertThat(cache.count(), equalTo(0)); @@ -154,11 +150,9 @@ public void testDatabasesUpdateExistingConfDatabase() throws Exception { }); } - private static Path prepareConfigDir() throws IOException { + private static Path prepareConfigDir() { Path dir = createTempDir(); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-ASN.mmdb"), dir.resolve("GeoLite2-ASN.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City.mmdb"), dir.resolve("GeoLite2-City.mmdb")); - Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-Country.mmdb"), dir.resolve("GeoLite2-Country.mmdb")); + copyDefaultDatabases(dir); return dir; } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index 34d5429142cec..1579c7020c58a 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -83,7 +83,7 @@ import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; -import static org.elasticsearch.ingest.geoip.GeoIpProcessorFactoryTests.copyDatabaseFiles; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.TYPE; import static org.hamcrest.Matchers.empty; @@ -117,10 +117,9 @@ public class DatabaseNodeServiceTests extends ESTestCase { @Before public void setup() throws IOException { final Path geoIpConfigDir = createTempDir(); - Files.createDirectories(geoIpConfigDir); GeoIpCache cache = new GeoIpCache(1000); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); threadPool = new TestThreadPool(ConfigDatabases.class.getSimpleName()); Settings settings = Settings.builder().put("resource.reload.interval.high", TimeValue.timeValueMillis(100)).build(); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java index 58cb566165db2..1676ce14698a9 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -39,6 +40,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import org.hamcrest.Matchers; @@ -85,7 +87,11 @@ public void setup() throws IOException { "e4a3411cdd7b21eaf18675da5a7f9f360d33c6882363b2c19c38715834c9e836 GeoIP2-City_20240709.tar.gz".getBytes(StandardCharsets.UTF_8) ); clusterService = mock(ClusterService.class); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + threadPool = new ThreadPool( + Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), + MeterRegistry.NOOP, + new DefaultBuiltInExecutorBuilders() + ); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(Settings.EMPTY, Set.of(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING)) ); @@ -476,7 +482,9 @@ public void testUpdateDatabasesWriteBlock() { "index [" + geoIpIndex + "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " - + "index has read-only-allow-delete block];" + + "index has read-only-allow-delete block; for more information, see " + + ReferenceDocs.FLOOD_STAGE_WATERMARK + + "];" ) ); verifyNoInteractions(httpClient); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 06b2605bd6d41..f213868fb65a1 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -44,6 +45,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -91,7 +93,11 @@ public void setup() throws IOException { httpClient = mock(HttpClient.class); when(httpClient.getBytes(anyString())).thenReturn("[]".getBytes(StandardCharsets.UTF_8)); clusterService = mock(ClusterService.class); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + threadPool = new ThreadPool( + Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), + MeterRegistry.NOOP, + new DefaultBuiltInExecutorBuilders() + ); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, @@ -590,7 +596,9 @@ public void testUpdateDatabasesWriteBlock() { "index [" + geoIpIndex + "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " - + "index has read-only-allow-delete block];" + + "index has read-only-allow-delete block; for more information, see " + + ReferenceDocs.FLOOD_STAGE_WATERMARK + + "];" ) ); verifyNoInteractions(httpClient); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 663ae1152246a..a0541df0d4d8a 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -25,18 +25,15 @@ import org.elasticsearch.ingest.geoip.Database.Property; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.junit.After; import org.junit.Before; -import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -45,6 +42,9 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.DEFAULT_DATABASES; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase; +import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; @@ -57,8 +57,6 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { - static Set DEFAULT_DATABASE_FILENAMES = Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"); - private Path geoipTmpDir; private Path geoIpConfigDir; private ConfigDatabases configDatabases; @@ -74,7 +72,7 @@ public void loadDatabaseReaders() throws IOException { Client client = mock(Client.class); GeoIpCache cache = new GeoIpCache(1000); configDatabases = new ConfigDatabases(geoIpConfigDir, new GeoIpCache(1000)); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); geoipTmpDir = createTempDir(); clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); @@ -181,7 +179,7 @@ public void testBuildDbFile() throws Exception { assertFalse(processor.isIgnoreMissing()); } - public void testBuildWithCountryDbAndAsnFields() throws Exception { + public void testBuildWithCountryDbAndAsnFields() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -201,7 +199,7 @@ public void testBuildWithCountryDbAndAsnFields() throws Exception { ); } - public void testBuildWithAsnDbAndCityFields() throws Exception { + public void testBuildWithAsnDbAndCityFields() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -218,10 +216,7 @@ public void testBuildWithAsnDbAndCityFields() throws Exception { } public void testBuildNonExistingDbFile() throws Exception { - Files.copy( - GeoIpProcessorFactoryTests.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), - geoipTmpDir.resolve("GeoLite2-City.mmdb") - ); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir.resolve("GeoLite2-City.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City.mmdb")); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); @@ -234,11 +229,11 @@ public void testBuildNonExistingDbFile() throws Exception { public void testBuildBuiltinDatabaseMissing() throws Exception { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - cleanDatabaseFiles(geoIpConfigDir, configDatabases); + cleanDatabases(geoIpConfigDir, configDatabases); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("database_file", randomFrom(DEFAULT_DATABASE_FILENAMES)); + config.put("database_file", randomFrom(DEFAULT_DATABASES)); Processor processor = factory.create(null, null, null, config); assertThat(processor, instanceOf(GeoIpProcessor.DatabaseUnavailableProcessor.class)); } @@ -267,7 +262,7 @@ public void testBuildFields() throws Exception { assertFalse(processor.isIgnoreMissing()); } - public void testBuildIllegalFieldOption() throws Exception { + public void testBuildIllegalFieldOption() { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config1 = new HashMap<>(); @@ -324,14 +319,13 @@ public void testBuildNullDatabase() throws Exception { assertThat(e.getMessage(), equalTo("[database_file] Unsupported database type [null] for file [GeoLite2-City.mmdb]")); } - @SuppressWarnings("HiddenField") public void testLazyLoading() throws Exception { final Path configDir = createTempDir(); final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); GeoIpCache cache = new GeoIpCache(1000); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); // Loading another database reader instances, because otherwise we can't test lazy loading as the // database readers used at class level are reused between tests. (we want to keep that otherwise running this @@ -358,7 +352,7 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-City.mmdb"); final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load @@ -369,7 +363,7 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-Country.mmdb"); final GeoIpProcessor country = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-Country.mmdb").databaseReader.get()); country.execute(document); // the first ingest should trigger a database load @@ -380,22 +374,21 @@ public void testLazyLoading() throws Exception { config.put("database_file", "GeoLite2-ASN.mmdb"); final GeoIpProcessor asn = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); asn.execute(document); // the first ingest should trigger a database load assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); } - @SuppressWarnings("HiddenField") public void testLoadingCustomDatabase() throws IOException { final Path configDir = createTempDir(); final Path geoIpConfigDir = configDir.resolve("ingest-geoip"); Files.createDirectories(geoIpConfigDir); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, new GeoIpCache(1000)); - copyDatabaseFiles(geoIpConfigDir, configDatabases); + copyDefaultDatabases(geoIpConfigDir, configDatabases); // fake the GeoIP2-City database - copyDatabaseFile(geoIpConfigDir, "GeoLite2-City.mmdb"); + copyDatabase("GeoLite2-City.mmdb", geoIpConfigDir); Files.move(geoIpConfigDir.resolve("GeoLite2-City.mmdb"), geoIpConfigDir.resolve("GeoIP2-City.mmdb")); /* @@ -428,7 +421,7 @@ public void testLoadingCustomDatabase() throws IOException { config.put("database_file", "GeoIP2-City.mmdb"); final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); - // these are lazy loaded until first use so we expect null here + // these are lazy loaded until first use, so we expect null here assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load @@ -490,7 +483,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData.get("city_name"), equalTo("Tumba")); } { - copyDatabaseFile(geoipTmpDir, "GeoLite2-City-Test.mmdb"); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); processor.execute(ingestDocument); @@ -498,7 +491,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData.get("city_name"), equalTo("Linköping")); } { - // No databases are available, so assume that databases still need to be downloaded and therefor not fail: + // No databases are available, so assume that databases still need to be downloaded and therefore not fail: IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); databaseNodeService.removeStaleEntries(List.of("GeoLite2-City.mmdb")); configDatabases.updateDatabase(geoIpConfigDir.resolve("GeoLite2-City.mmdb"), false); @@ -507,7 +500,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { assertThat(geoData, nullValue()); } { - // There are database available, but not the right one, so tag: + // There are databases available, but not the right one, so tag: databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); processor.execute(ingestDocument); @@ -517,7 +510,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { public void testDatabaseNotReadyYet() throws Exception { GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); - cleanDatabaseFiles(geoIpConfigDir, configDatabases); + cleanDatabases(geoIpConfigDir, configDatabases); { Map config = new HashMap<>(); @@ -542,7 +535,7 @@ public void testDatabaseNotReadyYet() throws Exception { ); } - copyDatabaseFile(geoipTmpDir, "GeoLite2-City-Test.mmdb"); + copyDatabase("GeoLite2-City-Test.mmdb", geoipTmpDir); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City-Test.mmdb")); { @@ -562,25 +555,9 @@ public void testDatabaseNotReadyYet() throws Exception { } } - private static void copyDatabaseFile(final Path path, final String databaseFilename) throws IOException { - Files.copy( - new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/" + databaseFilename)), - path.resolve(databaseFilename), - StandardCopyOption.REPLACE_EXISTING - ); - } - - static void copyDatabaseFiles(final Path path, ConfigDatabases configDatabases) throws IOException { - for (final String databaseFilename : DEFAULT_DATABASE_FILENAMES) { - copyDatabaseFile(path, databaseFilename); - configDatabases.updateDatabase(path.resolve(databaseFilename), true); + private static void cleanDatabases(final Path directory, ConfigDatabases configDatabases) { + for (final String database : DEFAULT_DATABASES) { + configDatabases.updateDatabase(directory.resolve(database), false); } } - - static void cleanDatabaseFiles(final Path path, ConfigDatabases configDatabases) throws IOException { - for (final String databaseFilename : DEFAULT_DATABASE_FILENAMES) { - configDatabases.updateDatabase(path.resolve(databaseFilename), false); - } - } - } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 6276155d9f083..762818a7c65db 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -134,7 +134,7 @@ public void testNonExistentWithIgnoreMissing() throws Exception { assertIngestDocument(originalIngestDocument, ingestDocument); } - public void testNullWithoutIgnoreMissing() throws Exception { + public void testNullWithoutIgnoreMissing() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -156,7 +156,7 @@ public void testNullWithoutIgnoreMissing() throws Exception { assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot extract geoip information.")); } - public void testNonExistentWithoutIgnoreMissing() throws Exception { + public void testNonExistentWithoutIgnoreMissing() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -463,7 +463,7 @@ public void testEnterprise() throws Exception { assertThat(geoData.get("residential_proxy"), equalTo(false)); assertThat(geoData.get("domain"), equalTo("frpt.net")); assertThat(geoData.get("isp"), equalTo("Fairpoint Communications")); - assertThat(geoData.get("isp_organization"), equalTo("Fairpoint Communications")); + assertThat(geoData.get("isp_organization_name"), equalTo("Fairpoint Communications")); assertThat(geoData.get("user_type"), equalTo("residential")); assertThat(geoData.get("connection_type"), equalTo("Cable/DSL")); } @@ -497,7 +497,7 @@ public void testIsp() throws Exception { assertThat(geoData.get("organization_name"), equalTo("CELLCO-PART")); assertThat(geoData.get("network"), equalTo("149.101.100.0/28")); assertThat(geoData.get("isp"), equalTo("Verizon Wireless")); - assertThat(geoData.get("isp_organization"), equalTo("Verizon Wireless")); + assertThat(geoData.get("isp_organization_name"), equalTo("Verizon Wireless")); assertThat(geoData.get("mobile_network_code"), equalTo("004")); assertThat(geoData.get("mobile_country_code"), equalTo("310")); } @@ -526,7 +526,7 @@ public void testAddressIsNotInTheDatabase() throws Exception { /** * Don't silently do DNS lookups or anything trappy on bogus data */ - public void testInvalid() throws Exception { + public void testInvalid() { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), null, @@ -803,7 +803,7 @@ long databaseFileSize() throws IOException { } @Override - InputStream databaseInputStream() throws IOException { + InputStream databaseInputStream() { return databaseInputStreamSupplier.get(); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java new file mode 100644 index 0000000000000..a3d72aca2295c --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTestUtils.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.core.SuppressForbidden; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Set; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +public final class GeoIpTestUtils { + + private GeoIpTestUtils() { + // utility class + } + + public static final Set DEFAULT_DATABASES = Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"); + + @SuppressForbidden(reason = "uses java.io.File") + private static boolean isDirectory(final Path path) { + return path.toFile().isDirectory(); + } + + public static void copyDatabase(final String databaseName, final Path destination) { + try (InputStream is = GeoIpTestUtils.class.getResourceAsStream("/" + databaseName)) { + if (is == null) { + throw new FileNotFoundException("Resource [" + databaseName + "] not found in classpath"); + } + + Files.copy(is, isDirectory(destination) ? destination.resolve(databaseName) : destination, REPLACE_EXISTING); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public static void copyDefaultDatabases(final Path directory) { + for (final String database : DEFAULT_DATABASES) { + copyDatabase(database, directory); + } + } + + public static void copyDefaultDatabases(final Path directory, ConfigDatabases configDatabases) { + for (final String database : DEFAULT_DATABASES) { + copyDatabase(database, directory); + configDatabases.updateDatabase(directory.resolve(database), true); + } + } +} diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb index 17fc3715090ae..1b142d0001b9c 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-Anonymous-IP-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb index 7ed43d616a85d..04220ff4b6411 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-City-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb index 7bfae78964df0..c49ca3ad48f39 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-Connection-Type-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb index d21c2a93df7d4..596a96617f241 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-Domain-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb index 837b725e9c154..16c1acf800260 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-Enterprise-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb index d16b0eee4c5e5..a4277d0a55c47 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoIP2-ISP-Test.mmdb differ diff --git a/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb b/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb index 0809201619b59..393efe464b610 100644 Binary files a/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb and b/modules/ingest-geoip/src/test/resources/GeoLite2-City-Test.mmdb differ diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java index 121a6b01ea792..c41fea6b86ceb 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/StoredExpressionIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.script.expression; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -18,10 +17,10 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; -import java.io.IOException; import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.hamcrest.Matchers.containsString; //TODO: please convert to unit tests! @@ -38,9 +37,9 @@ protected Collection> nodePlugins() { return Collections.singleton(ExpressionPlugin.class); } - public void testAllOpsDisabledIndexedScripts() throws IOException { - clusterAdmin().preparePutStoredScript().setId("script1").setContent(new BytesArray(""" - {"script": {"lang": "expression", "source": "2"} }"""), XContentType.JSON).get(); + public void testAllOpsDisabledIndexedScripts() { + putJsonStoredScript("script1", """ + {"script": {"lang": "expression", "source": "2"} }"""); prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); try { client().prepareUpdate("test", "1").setScript(new Script(ScriptType.STORED, null, "script1", Collections.emptyMap())).get(); diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java index 159851affd004..622a1bd4afd25 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScoreScript.java @@ -42,6 +42,12 @@ public boolean needs_score() { return needsScores; } + @Override + public boolean needs_termStats() { + // _termStats is not available for expressions + return false; + } + @Override public ScoreScript newInstance(final DocReader reader) throws IOException { // Use DocReader to get the leaf context while transitioning to DocReader for Painless. DocReader for expressions should follow. diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index e17fb4b26cd28..bfac939721122 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -8,7 +8,13 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.bytes.BytesArray; @@ -155,8 +161,8 @@ public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws ); } - public void testIndexedTemplateClient() throws Exception { - assertAcked(clusterAdmin().preparePutStoredScript().setId("testTemplate").setContent(new BytesArray(""" + public void testIndexedTemplateClient() { + putJsonStoredScript("testTemplate", """ { "script": { "lang": "mustache", @@ -168,9 +174,12 @@ public void testIndexedTemplateClient() throws Exception { } } } - }"""), XContentType.JSON)); + }"""); - GetStoredScriptResponse getResponse = clusterAdmin().prepareGetStoredScript("testTemplate").get(); + GetStoredScriptResponse getResponse = safeExecute( + GetStoredScriptAction.INSTANCE, + new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "testTemplate") + ); assertNotNull(getResponse.getSource()); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); @@ -193,9 +202,14 @@ public void testIndexedTemplateClient() throws Exception { 4 ); - assertAcked(clusterAdmin().prepareDeleteStoredScript("testTemplate")); + assertAcked( + safeExecute( + TransportDeleteStoredScriptAction.TYPE, + new DeleteStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "testTemplate") + ) + ); - getResponse = clusterAdmin().prepareGetStoredScript("testTemplate").get(); + getResponse = safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "testTemplate")); assertNull(getResponse.getSource()); } @@ -250,7 +264,7 @@ public void testBadTemplate() { } } - public void testIndexedTemplate() throws Exception { + public void testIndexedTemplate() { String script = """ { @@ -267,9 +281,9 @@ public void testIndexedTemplate() throws Exception { } """; - assertAcked(clusterAdmin().preparePutStoredScript().setId("1a").setContent(new BytesArray(script), XContentType.JSON)); - assertAcked(clusterAdmin().preparePutStoredScript().setId("2").setContent(new BytesArray(script), XContentType.JSON)); - assertAcked(clusterAdmin().preparePutStoredScript().setId("3").setContent(new BytesArray(script), XContentType.JSON)); + putJsonStoredScript("1a", script); + putJsonStoredScript("2", script); + putJsonStoredScript("3", script); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); @@ -335,13 +349,12 @@ public void testIndexedTemplateOverwrite() throws Exception { } }"""; for (int i = 1; i < iterations; i++) { - assertAcked( - clusterAdmin().preparePutStoredScript() - .setId("git01") - .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(-1))), XContentType.JSON) - ); + putJsonStoredScript("git01", query.replace("{{slop}}", Integer.toString(-1))); - GetStoredScriptResponse getResponse = clusterAdmin().prepareGetStoredScript("git01").get(); + GetStoredScriptResponse getResponse = safeExecute( + GetStoredScriptAction.INSTANCE, + new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "git01") + ); assertNotNull(getResponse.getSource()); Map templateParams = new HashMap<>(); @@ -357,11 +370,8 @@ public void testIndexedTemplateOverwrite() throws Exception { ); assertThat(e.getMessage(), containsString("No negative slop allowed")); - assertAcked( - clusterAdmin().preparePutStoredScript() - .setId("git01") - .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), XContentType.JSON) - ); + putJsonStoredScript("git01", query.replace("{{slop}}", Integer.toString(0))); + assertHitCount( new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) .setScript("git01") @@ -373,8 +383,8 @@ public void testIndexedTemplateOverwrite() throws Exception { } } - public void testIndexedTemplateWithArray() throws Exception { - String multiQuery = """ + public void testIndexedTemplateWithArray() { + putJsonStoredScript("4", """ { "script": { "lang": "mustache", @@ -390,8 +400,8 @@ public void testIndexedTemplateWithArray() throws Exception { } } } - }"""; - assertAcked(clusterAdmin().preparePutStoredScript().setId("4").setContent(new BytesArray(multiQuery), XContentType.JSON)); + }"""); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); bulkRequestBuilder.add(prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); bulkRequestBuilder.add(prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); @@ -454,4 +464,14 @@ public void testCCSCheckCompatibility() throws Exception { public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) { assertResponse(requestBuilder, response -> ElasticsearchAssertions.assertHitCount(response.getResponse(), expectedHitCount)); } + + private void putJsonStoredScript(String id, String jsonContent) { + assertAcked( + safeExecute( + TransportPutStoredScriptAction.TYPE, + new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id(id) + .content(new BytesArray(jsonContent), XContentType.JSON) + ) + ); + } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchUsageStatsIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchUsageStatsIT.java index b14ca7ea7cfa2..e51a4822c67ba 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchUsageStatsIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchUsageStatsIT.java @@ -10,16 +10,14 @@ import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.client.Request; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Collection; import java.util.List; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1) public class SearchUsageStatsIT extends ESIntegTestCase { @@ -62,7 +60,7 @@ public void testSearchUsageStats() throws IOException { getRestClient().performRequest(request); } { - assertAcked(clusterAdmin().preparePutStoredScript().setId("testTemplate").setContent(new BytesArray(""" + putJsonStoredScript("testTemplate", """ { "script": { "lang": "mustache", @@ -74,7 +72,7 @@ public void testSearchUsageStats() throws IOException { } } } - }"""), XContentType.JSON)); + }"""); Request request = new Request("GET", "/_search/template"); request.setJsonEntity(""" { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index b97d1b00573f4..2e50d14b24d72 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -142,12 +142,12 @@ public static byte[] writeMultiLineFormat(MultiSearchTemplateRequest multiSearch MultiSearchRequest.writeSearchRequestParams(searchRequest, xContentBuilder); BytesReference.bytes(xContentBuilder).writeTo(output); } - output.write(xContent.streamSeparator()); + output.write(xContent.bulkSeparator()); try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) { templateRequest.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); BytesReference.bytes(xContentBuilder).writeTo(output); } - output.write(xContent.streamSeparator()); + output.write(xContent.bulkSeparator()); } return output.toByteArray(); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index d9466536ac46c..f7f68324907ff 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -94,7 +94,7 @@ public static MultiSearchTemplateRequest parseRequest(RestRequest restRequest, b } @Override - public boolean supportsContentStream() { + public boolean supportsBulkContent() { return true; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index ba1d4dbaba012..cac278563d944 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -20,10 +20,10 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 74e4705447b64..35d3bfdf6446d 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -16,11 +16,11 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index 3c183830afa6d..b11ebd2e652cb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -16,11 +16,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.painless.PainlessScriptEngine; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.rest.BaseRestHandler; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 0736fd4ef4a87..919f2659e1f92 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryFormatterFactory; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,6 +67,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.painless.spi.PainlessTestScript; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt index 5082d5f1c7bdb..0dab7dcbadfb5 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt @@ -13,6 +13,23 @@ class org.elasticsearch.script.ScoreScript @no_import { class org.elasticsearch.script.ScoreScript$Factory @no_import { } +class org.elasticsearch.script.StatsSummary { + double getMin() + double getMax() + double getAverage() + double getSum() + long getCount() +} + +class org.elasticsearch.script.ScriptTermStats { + int uniqueTermsCount() + int matchedTermsCount() + StatsSummary docFreq() + StatsSummary totalTermFreq() + StatsSummary termFreq() + StatsSummary termPositions() +} + static_import { double saturation(double, double) from_class org.elasticsearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.elasticsearch.script.ScoreScriptUtils diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreScriptTests.java new file mode 100644 index 0000000000000..08b55fdf3bcc3 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreScriptTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.script.ScoreScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.painless.ScriptTestCase.PAINLESS_BASE_WHITELIST; + +public class ScoreScriptTests extends ESSingleNodeTestCase { + /** + * Test that needTermStats() is reported correctly depending on whether _termStats is used + */ + public void testNeedsTermStats() { + IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); + + Map, List> contexts = new HashMap<>(); + List whitelists = new ArrayList<>(PAINLESS_BASE_WHITELIST); + whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.script.score.txt")); + contexts.put(ScoreScript.CONTEXT, whitelists); + PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts); + + SearchExecutionContext searchExecutionContext = index.newSearchExecutionContext(0, 0, null, () -> 0, null, emptyMap()); + + ScoreScript.Factory factory = service.compile(null, "1.2", ScoreScript.CONTEXT, Collections.emptyMap()); + ScoreScript.LeafFactory ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup()); + assertFalse(ss.needs_termStats()); + + factory = service.compile(null, "doc['d'].value", ScoreScript.CONTEXT, Collections.emptyMap()); + ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup()); + assertFalse(ss.needs_termStats()); + + factory = service.compile(null, "1/_termStats.totalTermFreq().getAverage()", ScoreScript.CONTEXT, Collections.emptyMap()); + ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup()); + assertTrue(ss.needs_termStats()); + + factory = service.compile(null, "doc['d'].value * _termStats.docFreq().getSum()", ScoreScript.CONTEXT, Collections.emptyMap()); + ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup()); + assertTrue(ss.needs_termStats()); + } +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml new file mode 100644 index 0000000000000..f82b844f01588 --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml @@ -0,0 +1,612 @@ +setup: + - requires: + cluster_features: ["script.term_stats"] + reason: "support for term stats has been added in 8.16" + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: "2" + mappings: + properties: + title: + type: text + genre: + type: text + fields: + keyword: + type: keyword + + - do: + index: { refresh: true, index: test-index, id: "1", routing: 0, body: {"title": "Star wars", "genre": "Sci-fi"} } + - do: + index: { refresh: true, index: test-index, id: "2", routing: 1, body: {"title": "Star trek", "genre": "Sci-fi"} } + - do: + index: { refresh: true, index: test-index, id: "3", routing: 1, body: {"title": "Rambo", "genre": "War movie"} } + - do: + index: { refresh: true, index: test-index, id: "4", routing: 1, body: {"title": "Rambo II", "genre": "War movie"} } + +--- +"match query: uniqueTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: uniqueTermsCount with DFS": + - do: + search: + search_type: dfs_query_then_fetch + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: matchedTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: matchedTermsCount with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq min without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 0 } + +--- +"match query: docFreq min with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq max without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: docFreq max with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"match query: totalTermFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: totalTermFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 3 } + - match: { hits.hits.1._score: 3 } + +--- +"match query: termFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termPositions avg without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1.5 } + - match: { hits.hits.1._score: 1 } + +--- +"match query: termPositions avg with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { match: { "title": "Star wars" } } + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1.5 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: uniqueTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: uniqueTermsCount with DFS": + - do: + search: + search_type: dfs_query_then_fetch + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: matchedTermsCount without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: matchedTermsCount with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.matchedTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq min without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq min with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.docFreq().getMin()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: docFreq max without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: docFreq max with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: totalTermFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: totalTermFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 2 } + - match: { hits.hits.1._score: 2 } + +--- +"term query: termFreq sum without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: termFreq sum with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.termFreq().getSum()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 1 } + - match: { hits.hits.1._score: 1 } + +--- +"term query: termPositions avg without DFS": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + +--- +"term query: termPositions avg with DFS": + - do: + search: + rest_total_hits_as_int: true + search_type: dfs_query_then_fetch + index: test-index + body: + query: + script_score: + query: { term: { "genre.keyword": "Sci-fi" } } + script: + source: "return _termStats.termPositions().getAverage()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + +--- +"Complex bool query: uniqueTermsCount": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + bool: + must: + match: { "title": "star wars" } + should: + term: { "genre.keyword": "Sci-fi" } + filter: + match: { "genre" : "sci"} + must_not: + term: { "genre.keyword": "War" } + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 2 } + - match: { hits.hits.0._score: 4 } + - match: { hits.hits.1._score: 4 } + + +--- +"match_all query: uniqueTermsCount": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + match_all: {} + script: + source: "return _termStats.uniqueTermsCount()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: docFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + match_all: {} + script: + source: "return _termStats.docFreq().getMax()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: totalTermFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + match_all: {} + script: + source: "return _termStats.totalTermFreq().getSum()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: termFreq": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + match_all: {} + script: + source: "return _termStats.termFreq().getMax()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } + +--- +"match_all query: termPositions": + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + script_score: + query: + match_all: {} + script: + source: "return _termStats.termPositions().getSum()" + - match: { hits.total: 4 } + - match: { hits.hits.0._score: 0 } + - match: { hits.hits.1._score: 0 } + - match: { hits.hits.2._score: 0 } + - match: { hits.hits.3._score: 0 } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml index 67457e64c874e..b1505368a5c2b 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/60_script_doc_values_binary.yml @@ -1,130 +1,145 @@ --- "binary": - - skip: - features: ["headers"] - - do: - indices.create: - index: test - body: - mappings: - properties: - binary: - type: binary - doc_values: true + - skip: + features: ["headers"] + - do: + indices.create: + index: test + body: + mappings: + properties: + binary: + type: binary + doc_values: true + sort_field: + type: keyword + doc_values: true - - do: - #set the header so we won't randomize it - headers: - Content-Type: application/json - index: - index: test - id: "1" - body: - binary: "U29tZSBiaW5hcnkgYmxvYg==" + - do: + #set the header so we won't randomize it + headers: + Content-Type: application/json + index: + index: test + id: "1" + body: + binary: "U29tZSBiaW5hcnkgYmxvYg==" + sort_field: "1" - - do: - #set the header so we won't randomize it - headers: - Content-Type: application/json - index: - index: test - id: "2" - body: - binary: [ - "U29tZSBiaW5hcnkgYmxvYg==", - "MTIzNA==", - "dGVzdA==" - ] + - do: + #set the header so we won't randomize it + headers: + Content-Type: application/json + index: + index: test + id: "2" + body: + binary: [ + "U29tZSBiaW5hcnkgYmxvYg==", + "MTIzNA==", + "dGVzdA==" + ] + sort_field: "2" - - do: - #set the header so we won't randomize it - headers: - Content-Type: application/json - index: - index: test - id: "3" - body: {} + - do: + #set the header so we won't randomize it + headers: + Content-Type: application/json + index: + index: test + id: "3" + body: + sort_field: "3" - - do: - indices.refresh: {} + - do: + indices.refresh: {} - - do: - search: - body: - script_fields: - field1: - script: - source: "if (doc['binary'].size() == 0) {return 'empty'} doc['binary'].get(0).utf8ToString()" - field2: - script: - source: "if (doc['binary'].size() == 0) {return 'empty'} doc['binary'].value.utf8ToString()" - - match: { hits.hits.0.fields.field1.0: "Some binary blob" } - - match: { hits.hits.0.fields.field2.0: "Some binary blob" } + - do: + search: + body: + sort: sort_field + script_fields: + field1: + script: + source: "if (doc['binary'].size() == 0) {return 'empty'} doc['binary'].get(0).utf8ToString()" + field2: + script: + source: "if (doc['binary'].size() == 0) {return 'empty'} doc['binary'].value.utf8ToString()" + - match: { hits.hits.0.fields.field1.0: "Some binary blob" } + - match: { hits.hits.0.fields.field2.0: "Some binary blob" } + - match: { hits.hits.1.fields.field1.0: "1234" } + - match: { hits.hits.1.fields.field2.0: "1234" } + - match: { hits.hits.2.fields.field1.0: "empty" } + - match: { hits.hits.2.fields.field2.0: "empty" } - - do: - search: - body: - script_fields: - field1: - script: - source: "ByteBuffer bb = field('binary').get(null); if (bb == null) {return -1;} return bb.get(0)" - field2: - script: - source: "ByteBuffer bb = field('binary').get(0, null); if (bb == null) {return -1;} return bb.get(0)" - field3: - script: - source: "int total = 0; for (value in field('binary')) {total += value.get(0)} total" - - match: { hits.hits.0.fields.field1.0: 83 } - - match: { hits.hits.0.fields.field2.0: 83 } - - match: { hits.hits.0.fields.field3.0: 83 } - - match: { hits.hits.1.fields.field1.0: 49 } - - match: { hits.hits.1.fields.field2.0: 49 } - - match: { hits.hits.1.fields.field3.0: 248 } - - match: { hits.hits.2.fields.field1.0: -1 } - - match: { hits.hits.2.fields.field2.0: -1 } - - match: { hits.hits.2.fields.field3.0: 0 } + - do: + search: + body: + sort: sort_field + script_fields: + field1: + script: + source: "ByteBuffer bb = field('binary').get(null); if (bb == null) {return -1;} return bb.get(0)" + field2: + script: + source: "ByteBuffer bb = field('binary').get(0, null); if (bb == null) {return -1;} return bb.get(0)" + field3: + script: + source: "int total = 0; for (value in field('binary')) {total += value.get(0)} total" + - match: { hits.hits.0.fields.field1.0: 83 } + - match: { hits.hits.0.fields.field2.0: 83 } + - match: { hits.hits.0.fields.field3.0: 83 } + - match: { hits.hits.1.fields.field1.0: 49 } + - match: { hits.hits.1.fields.field2.0: 49 } + - match: { hits.hits.1.fields.field3.0: 248 } + - match: { hits.hits.2.fields.field1.0: -1 } + - match: { hits.hits.2.fields.field2.0: -1 } + - match: { hits.hits.2.fields.field3.0: 0 } - - do: - search: - body: - script_fields: - field1: - script: - source: "ByteBuffer bb = field('binary').get(null); if (bb == null) {return -1;} return bb.limit()" - field2: - script: - source: "ByteBuffer bb = field('binary').get(0, null); if (bb == null) {return -1;} return bb.limit()" - field3: - script: - source: "int total = 0; for (ByteBuffer value : field('binary')) {total += value.limit()} total" - - match: { hits.hits.0.fields.field1.0: 16 } - - match: { hits.hits.0.fields.field2.0: 16 } - - match: { hits.hits.0.fields.field3.0: 16 } - - match: { hits.hits.1.fields.field1.0: 4 } - - match: { hits.hits.1.fields.field2.0: 4 } - - match: { hits.hits.1.fields.field3.0: 24 } - - match: { hits.hits.2.fields.field1.0: -1 } - - match: { hits.hits.2.fields.field2.0: -1 } - - match: { hits.hits.2.fields.field3.0: 0 } + - do: + search: + body: + sort: sort_field + script_fields: + field1: + script: + source: "ByteBuffer bb = field('binary').get(null); if (bb == null) {return -1;} return bb.limit()" + field2: + script: + source: "ByteBuffer bb = field('binary').get(0, null); if (bb == null) {return -1;} return bb.limit()" + field3: + script: + source: "int total = 0; for (ByteBuffer value : field('binary')) {total += value.limit()} total" + - match: { hits.hits.0.fields.field1.0: 16 } + - match: { hits.hits.0.fields.field2.0: 16 } + - match: { hits.hits.0.fields.field3.0: 16 } + - match: { hits.hits.1.fields.field1.0: 4 } + - match: { hits.hits.1.fields.field2.0: 4 } + - match: { hits.hits.1.fields.field3.0: 24 } + - match: { hits.hits.2.fields.field1.0: -1 } + - match: { hits.hits.2.fields.field2.0: -1 } + - match: { hits.hits.2.fields.field3.0: 0 } - - do: - search: - body: - script_fields: - field1: - script: - source: "ByteBuffer bb = $('binary', null); if (bb == null) {return -1;} return bb.get(0)" - - match: { hits.hits.0.fields.field1.0: 83 } - - match: { hits.hits.1.fields.field1.0: 49 } - - match: { hits.hits.2.fields.field1.0: -1 } + - do: + search: + body: + sort: sort_field + script_fields: + field1: + script: + source: "ByteBuffer bb = $('binary', null); if (bb == null) {return -1;} return bb.get(0)" + - match: { hits.hits.0.fields.field1.0: 83 } + - match: { hits.hits.1.fields.field1.0: 49 } + - match: { hits.hits.2.fields.field1.0: -1 } - - do: - search: - body: - script_fields: - field1: - script: - source: "ByteBuffer bb = $('binary', null); if (bb == null) {return -1;} return bb.limit()" - - match: { hits.hits.0.fields.field1.0: 16 } - - match: { hits.hits.1.fields.field1.0: 4 } - - match: { hits.hits.2.fields.field1.0: -1 } + - do: + search: + body: + sort: sort_field + script_fields: + field1: + script: + source: "ByteBuffer bb = $('binary', null); if (bb == null) {return -1;} return bb.limit()" + - match: { hits.hits.0.fields.field1.0: 16 } + - match: { hits.hits.1.fields.field1.0: 4 } + - match: { hits.hits.2.fields.field1.0: -1 } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index 899cc42fea1e0..b3cd3586fca54 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -447,7 +447,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" ); } - return new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), leafName(), null) { + return new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), leafName()) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index ad1b54afd985d..258eb4ba3e22a 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -20,12 +20,12 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexPluginMetricsIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexPluginMetricsIT.java new file mode 100644 index 0000000000000..e7d26b0808a48 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexPluginMetricsIT.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.reindex.BulkIndexByScrollResponseMatcher; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.reindex.DeleteByQueryMetrics.DELETE_BY_QUERY_TIME_HISTOGRAM; +import static org.elasticsearch.reindex.ReindexMetrics.REINDEX_TIME_HISTOGRAM; +import static org.elasticsearch.reindex.UpdateByQueryMetrics.UPDATE_BY_QUERY_TIME_HISTOGRAM; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class ReindexPluginMetricsIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class, TestTelemetryPlugin.class); + } + + protected ReindexRequestBuilder reindex() { + return new ReindexRequestBuilder(client()); + } + + protected UpdateByQueryRequestBuilder updateByQuery() { + return new UpdateByQueryRequestBuilder(client()); + } + + protected DeleteByQueryRequestBuilder deleteByQuery() { + return new DeleteByQueryRequestBuilder(client()); + } + + public static BulkIndexByScrollResponseMatcher matcher() { + return new BulkIndexByScrollResponseMatcher(); + } + + public void testReindexMetrics() throws Exception { + final String dataNodeName = internalCluster().startNode(); + + indexRandom( + true, + prepareIndex("source").setId("1").setSource("foo", "a"), + prepareIndex("source").setId("2").setSource("foo", "a"), + prepareIndex("source").setId("3").setSource("foo", "b"), + prepareIndex("source").setId("4").setSource("foo", "c") + ); + assertHitCount(prepareSearch("source").setSize(0), 4); + + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + // Copy all the docs + reindex().source("source").destination("dest").get(); + // Use assertBusy to wait for all threads to complete so we get deterministic results + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(1)); + }); + + // Now none of them + createIndex("none"); + reindex().source("source").destination("none").filter(termQuery("foo", "no_match")).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(2)); + }); + + // Now half of them + reindex().source("source").destination("dest_half").filter(termQuery("foo", "a")).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(3)); + }); + + // Limit with maxDocs + reindex().source("source").destination("dest_size_one").maxDocs(1).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(REINDEX_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(4)); + }); + } + + public void testDeleteByQueryMetrics() throws Exception { + final String dataNodeName = internalCluster().startNode(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c"), + prepareIndex("test").setId("5").setSource("foo", "d"), + prepareIndex("test").setId("6").setSource("foo", "e"), + prepareIndex("test").setId("7").setSource("foo", "f") + ); + + assertHitCount(prepareSearch("test").setSize(0), 7); + + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + // Deletes two docs that matches "foo:a" + deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(1)); + }); + + // Deletes the two first docs with limit by size + DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).size(2).refresh(true); + request.source().addSort("foo.keyword", SortOrder.ASC); + request.get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(2)); + }); + + // Deletes but match no docs + deleteByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(3)); + }); + + // Deletes all remaining docs + deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(DELETE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(4)); + }); + } + + public void testUpdateByQueryMetrics() throws Exception { + final String dataNodeName = internalCluster().startNode(); + + indexRandom( + true, + prepareIndex("test").setId("1").setSource("foo", "a"), + prepareIndex("test").setId("2").setSource("foo", "a"), + prepareIndex("test").setId("3").setSource("foo", "b"), + prepareIndex("test").setId("4").setSource("foo", "c") + ); + assertHitCount(prepareSearch("test").setSize(0), 4); + assertEquals(1, client().prepareGet("test", "1").get().getVersion()); + assertEquals(1, client().prepareGet("test", "4").get().getVersion()); + + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + // Reindex all the docs + updateByQuery().source("test").refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(1)); + }); + + // Now none of them + updateByQuery().source("test").filter(termQuery("foo", "no_match")).refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(2)); + }); + + // Now half of them + updateByQuery().source("test").filter(termQuery("foo", "a")).refresh(true).get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(3)); + }); + + // Limit with size + UpdateByQueryRequestBuilder request = updateByQuery().source("test").size(3).refresh(true); + request.source().addSort("foo.keyword", SortOrder.ASC); + request.get(); + assertBusy(() -> { + testTelemetryPlugin.collect(); + List measurements = testTelemetryPlugin.getLongHistogramMeasurement(UPDATE_BY_QUERY_TIME_HISTOGRAM); + assertThat(measurements.size(), equalTo(4)); + }); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkByScrollParallelizationHelper.java index e4f734fa5721c..82c42f5d680ff 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkByScrollParallelizationHelper.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -114,12 +115,14 @@ static > void initTaskState ) { int configuredSlices = request.getSlices(); if (configuredSlices == AbstractBulkByScrollRequest.AUTO_SLICES) { - ClusterSearchShardsRequest shardsRequest = new ClusterSearchShardsRequest(); - shardsRequest.indices(request.getSearchRequest().indices()); - client.admin().cluster().searchShards(shardsRequest, listener.safeMap(response -> { - setWorkerCount(request, task, countSlicesBasedOnShards(response)); - return null; - })); + client.execute( + TransportClusterSearchShardsAction.TYPE, + new ClusterSearchShardsRequest(request.getTimeout(), request.getSearchRequest().indices()), + listener.safeMap(response -> { + setWorkerCount(request, task, countSlicesBasedOnShards(response)); + return null; + }) + ); } else { setWorkerCount(request, task, configuredSlices); listener.onResponse(null); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/DeleteByQueryMetrics.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/DeleteByQueryMetrics.java new file mode 100644 index 0000000000000..2cedf0d5f5823 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/DeleteByQueryMetrics.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public class DeleteByQueryMetrics { + public static final String DELETE_BY_QUERY_TIME_HISTOGRAM = "es.delete_by_query.duration.histogram"; + + private final LongHistogram deleteByQueryTimeSecsHistogram; + + public DeleteByQueryMetrics(MeterRegistry meterRegistry) { + this( + meterRegistry.registerLongHistogram(DELETE_BY_QUERY_TIME_HISTOGRAM, "Time taken to execute Delete by Query request", "seconds") + ); + } + + private DeleteByQueryMetrics(LongHistogram deleteByQueryTimeSecsHistogram) { + this.deleteByQueryTimeSecsHistogram = deleteByQueryTimeSecsHistogram; + } + + public long recordTookTime(long tookTime) { + deleteByQueryTimeSecsHistogram.record(tookTime); + return tookTime; + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java new file mode 100644 index 0000000000000..3025357aa6538 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public class ReindexMetrics { + + public static final String REINDEX_TIME_HISTOGRAM = "es.reindex.duration.histogram"; + + private final LongHistogram reindexTimeSecsHistogram; + + public ReindexMetrics(MeterRegistry meterRegistry) { + this(meterRegistry.registerLongHistogram(REINDEX_TIME_HISTOGRAM, "Time to reindex by search", "millis")); + } + + private ReindexMetrics(LongHistogram reindexTimeSecsHistogram) { + this.reindexTimeSecsHistogram = reindexTimeSecsHistogram; + } + + public long recordTookTime(long tookTime) { + reindexTimeSecsHistogram.record(tookTime); + return tookTime; + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java index 1a40f77250e5f..3169d4c4ee1fb 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java @@ -34,7 +34,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.function.Predicate; import java.util.function.Supplier; @@ -85,8 +84,11 @@ public List getRestHandlers( @Override public Collection createComponents(PluginServices services) { - return Collections.singletonList( - new ReindexSslConfig(services.environment().settings(), services.environment(), services.resourceWatcherService()) + return List.of( + new ReindexSslConfig(services.environment().settings(), services.environment(), services.resourceWatcherService()), + new ReindexMetrics(services.telemetryProvider().getMeterRegistry()), + new UpdateByQueryMetrics(services.telemetryProvider().getMeterRegistry()), + new DeleteByQueryMetrics(services.telemetryProvider().getMeterRegistry()) ); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java index dbe1968bb076a..cb393a42f52a1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -65,6 +66,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.LongSupplier; @@ -82,19 +84,22 @@ public class Reindexer { private final ThreadPool threadPool; private final ScriptService scriptService; private final ReindexSslConfig reindexSslConfig; + private final ReindexMetrics reindexMetrics; Reindexer( ClusterService clusterService, Client client, ThreadPool threadPool, ScriptService scriptService, - ReindexSslConfig reindexSslConfig + ReindexSslConfig reindexSslConfig, + @Nullable ReindexMetrics reindexMetrics ) { this.clusterService = clusterService; this.client = client; this.threadPool = threadPool; this.scriptService = scriptService; this.reindexSslConfig = reindexSslConfig; + this.reindexMetrics = reindexMetrics; } public void initTask(BulkByScrollTask task, ReindexRequest request, ActionListener listener) { @@ -102,6 +107,8 @@ public void initTask(BulkByScrollTask task, ReindexRequest request, ActionListen } public void execute(BulkByScrollTask task, ReindexRequest request, Client bulkClient, ActionListener listener) { + long startTime = System.nanoTime(); + BulkByScrollParallelizationHelper.executeSlicedAction( task, request, @@ -122,7 +129,12 @@ public void execute(BulkByScrollTask task, ReindexRequest request, Client bulkCl clusterService.state(), reindexSslConfig, request, - listener + ActionListener.runAfter(listener, () -> { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - startTime); + if (reindexMetrics != null) { + reindexMetrics.recordTookTime(elapsedTime); + } + }) ); searchAction.start(); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportDeleteByQueryAction.java index 530246db35c93..53381c33d7f78 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportDeleteByQueryAction.java @@ -14,23 +14,27 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.concurrent.TimeUnit; + public class TransportDeleteByQueryAction extends HandledTransportAction { private final ThreadPool threadPool; private final Client client; private final ScriptService scriptService; private final ClusterService clusterService; + private final DeleteByQueryMetrics deleteByQueryMetrics; @Inject public TransportDeleteByQueryAction( @@ -39,18 +43,21 @@ public TransportDeleteByQueryAction( Client client, TransportService transportService, ScriptService scriptService, - ClusterService clusterService + ClusterService clusterService, + @Nullable DeleteByQueryMetrics deleteByQueryMetrics ) { super(DeleteByQueryAction.NAME, transportService, actionFilters, DeleteByQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.client = client; this.scriptService = scriptService; this.clusterService = clusterService; + this.deleteByQueryMetrics = deleteByQueryMetrics; } @Override public void doExecute(Task task, DeleteByQueryRequest request, ActionListener listener) { BulkByScrollTask bulkByScrollTask = (BulkByScrollTask) task; + long startTime = System.nanoTime(); BulkByScrollParallelizationHelper.startSlicedAction( request, bulkByScrollTask, @@ -64,8 +71,20 @@ public void doExecute(Task task, DeleteByQueryRequest request, ActionListener { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - startTime); + if (deleteByQueryMetrics != null) { + deleteByQueryMetrics.recordTookTime(elapsedTime); + } + }) + ).start(); } ); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportReindexAction.java index f19d359ddeb44..821a137ac7566 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportReindexAction.java @@ -15,15 +15,16 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -53,7 +54,8 @@ public TransportReindexAction( AutoCreateIndex autoCreateIndex, Client client, TransportService transportService, - ReindexSslConfig sslConfig + ReindexSslConfig sslConfig, + @Nullable ReindexMetrics reindexMetrics ) { this( ReindexAction.NAME, @@ -66,7 +68,8 @@ public TransportReindexAction( autoCreateIndex, client, transportService, - sslConfig + sslConfig, + reindexMetrics ); } @@ -81,12 +84,13 @@ protected TransportReindexAction( AutoCreateIndex autoCreateIndex, Client client, TransportService transportService, - ReindexSslConfig sslConfig + ReindexSslConfig sslConfig, + @Nullable ReindexMetrics reindexMetrics ) { super(name, transportService, actionFilters, ReindexRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.client = client; this.reindexValidator = new ReindexValidator(settings, clusterService, indexNameExpressionResolver, autoCreateIndex); - this.reindexer = new Reindexer(clusterService, client, threadPool, scriptService, sslConfig); + this.reindexer = new Reindexer(clusterService, client, threadPool, scriptService, sslConfig, reindexMetrics); } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java index e5df42a484172..4d09253e046d4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java @@ -17,9 +17,9 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.LeaderBulkByScrollTaskState; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportUpdateByQueryAction.java index de84d74d05ee8..997d4d32fe042 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportUpdateByQueryAction.java @@ -17,14 +17,15 @@ import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.index.reindex.UpdateByQueryAction; import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.index.reindex.WorkerBulkByScrollTaskState; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.CtxMap; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -35,6 +36,7 @@ import org.elasticsearch.transport.TransportService; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.LongSupplier; @@ -44,6 +46,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction listener) { BulkByScrollTask bulkByScrollTask = (BulkByScrollTask) task; + long startTime = System.nanoTime(); BulkByScrollParallelizationHelper.startSlicedAction( request, bulkByScrollTask, @@ -78,8 +84,21 @@ protected void doExecute(Task task, UpdateByQueryRequest request, ActionListener clusterService.localNode(), bulkByScrollTask ); - new AsyncIndexBySearchAction(bulkByScrollTask, logger, assigningClient, threadPool, scriptService, request, state, listener) - .start(); + new AsyncIndexBySearchAction( + bulkByScrollTask, + logger, + assigningClient, + threadPool, + scriptService, + request, + state, + ActionListener.runAfter(listener, () -> { + long elapsedTime = TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - startTime); + if (updateByQueryMetrics != null) { + updateByQueryMetrics.recordTookTime(elapsedTime); + } + }) + ).start(); } ); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/UpdateByQueryMetrics.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/UpdateByQueryMetrics.java new file mode 100644 index 0000000000000..6ca52769a1ba9 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/UpdateByQueryMetrics.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public class UpdateByQueryMetrics { + public static final String UPDATE_BY_QUERY_TIME_HISTOGRAM = "es.update_by_query.duration.histogram"; + + private final LongHistogram updateByQueryTimeSecsHistogram; + + public UpdateByQueryMetrics(MeterRegistry meterRegistry) { + this( + meterRegistry.registerLongHistogram(UPDATE_BY_QUERY_TIME_HISTOGRAM, "Time taken to execute Update by Query request", "seconds") + ); + } + + private UpdateByQueryMetrics(LongHistogram updateByQueryTimeSecsHistogram) { + this.updateByQueryTimeSecsHistogram = updateByQueryTimeSecsHistogram; + } + + public long recordTookTime(long tookTime) { + updateByQueryTimeSecsHistogram.record(tookTime); + return tookTime; + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index c40a4f72bc133..47505919ba7d2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; @@ -992,7 +993,7 @@ private static class DummyTransportAsyncBulkByScrollAction extends TransportActi BulkByScrollResponse> { protected DummyTransportAsyncBulkByScrollAction(String actionName, ActionFilters actionFilters, TaskManager taskManager) { - super(actionName, actionFilters, taskManager); + super(actionName, actionFilters, taskManager, EsExecutors.DIRECT_EXECUTOR_SERVICE); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryMetricsTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryMetricsTests.java new file mode 100644 index 0000000000000..58adc6aebaa9b --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryMetricsTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; + +import static org.elasticsearch.reindex.DeleteByQueryMetrics.DELETE_BY_QUERY_TIME_HISTOGRAM; + +public class DeleteByQueryMetricsTests extends ESTestCase { + private RecordingMeterRegistry recordingMeterRegistry; + private DeleteByQueryMetrics metrics; + + @Before + public void createMetrics() { + recordingMeterRegistry = new RecordingMeterRegistry(); + metrics = new DeleteByQueryMetrics(recordingMeterRegistry); + } + + public void testRecordTookTime() { + int secondsTaken = randomIntBetween(1, 50); + metrics.recordTookTime(secondsTaken); + List measurements = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_HISTOGRAM, DELETE_BY_QUERY_TIME_HISTOGRAM); + assertEquals(measurements.size(), 1); + assertEquals(measurements.get(0).getLong(), secondsTaken); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexMetricsTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexMetricsTests.java new file mode 100644 index 0000000000000..4711530585817 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexMetricsTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; + +import static org.elasticsearch.reindex.ReindexMetrics.REINDEX_TIME_HISTOGRAM; + +public class ReindexMetricsTests extends ESTestCase { + + private RecordingMeterRegistry recordingMeterRegistry; + private ReindexMetrics metrics; + + @Before + public void createMetrics() { + recordingMeterRegistry = new RecordingMeterRegistry(); + metrics = new ReindexMetrics(recordingMeterRegistry); + } + + public void testRecordTookTime() { + int secondsTaken = randomIntBetween(1, 50); + metrics.recordTookTime(secondsTaken); + List measurements = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_HISTOGRAM, REINDEX_TIME_HISTOGRAM); + assertEquals(measurements.size(), 1); + assertEquals(measurements.get(0).getLong(), secondsTaken); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryMetricsTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryMetricsTests.java new file mode 100644 index 0000000000000..548d18d202984 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryMetricsTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reindex; + +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; + +import static org.elasticsearch.reindex.UpdateByQueryMetrics.UPDATE_BY_QUERY_TIME_HISTOGRAM; + +public class UpdateByQueryMetricsTests extends ESTestCase { + + private RecordingMeterRegistry recordingMeterRegistry; + private UpdateByQueryMetrics metrics; + + @Before + public void createMetrics() { + recordingMeterRegistry = new RecordingMeterRegistry(); + metrics = new UpdateByQueryMetrics(recordingMeterRegistry); + } + + public void testRecordTookTime() { + int secondsTaken = randomIntBetween(1, 50); + metrics.recordTookTime(secondsTaken); + List measurements = recordingMeterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_HISTOGRAM, UPDATE_BY_QUERY_TIME_HISTOGRAM); + assertEquals(measurements.size(), 1); + assertEquals(measurements.get(0).getLong(), secondsTaken); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWithScriptTests.java index 876ddefda161b..c4d591f804750 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/UpdateByQueryWithScriptTests.java @@ -60,6 +60,7 @@ protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action(ScriptSer null, transportService, scriptService, + null, null ); return new TransportUpdateByQueryAction.AsyncIndexBySearchAction( diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index d093816acd45f..6334e5ae6a195 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -1,5 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin +import org.elasticsearch.gradle.internal.test.RestIntegTestTask /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one @@ -17,65 +19,69 @@ esplugin { } versions << [ - 'azure': '12.20.1', - 'azureCommon': '12.19.1', - 'azureCore': '1.34.0', - 'azureCoreHttpNetty': '1.12.7', - 'azureJackson': '2.15.4', - 'azureJacksonDatabind': '2.13.4.2', - 'azureAvro': '12.5.3', - - 'jakartaActivation': '1.2.1', - 'jakartaXMLBind': '2.3.2', - 'stax2API': '4.2.1', - 'woodstox': '6.4.0', - - 'reactorNetty': '1.0.39', - 'reactorCore': '3.4.34', - 'reactiveStreams': '1.0.4', + 'azureReactorNetty': '1.0.45', ] dependencies { - api "com.azure:azure-storage-blob:${versions.azure}" - api "com.azure:azure-storage-common:${versions.azureCommon}" - api "com.azure:azure-core-http-netty:${versions.azureCoreHttpNetty}" - api "com.azure:azure-core:${versions.azureCore}" - // jackson - api "com.fasterxml.jackson.core:jackson-core:${versions.azureJackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.azureJacksonDatabind}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.azureJackson}" - - // jackson xml - api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.azureJackson}" - api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.azureJackson}" - api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.azureJackson}" - api "jakarta.activation:jakarta.activation-api:${versions.jakartaActivation}" - // The SDK uses javax.xml bindings - api "jakarta.xml.bind:jakarta.xml.bind-api:${versions.jakartaXMLBind}" - api "org.codehaus.woodstox:stax2-api:${versions.stax2API}" - api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - - // netty + // Microsoft + api "com.azure:azure-core-http-netty:1.15.3" + api "com.azure:azure-core:1.51.0" + api "com.azure:azure-identity:1.13.2" + api "com.azure:azure-json:1.2.0" + api "com.azure:azure-storage-blob:12.27.1" + api "com.azure:azure-storage-common:12.26.1" + api "com.azure:azure-storage-internal-avro:12.12.1" + api "com.azure:azure-xml:1.1.0" + api "com.microsoft.azure:msal4j-persistence-extension:1.3.0" + api "com.microsoft.azure:msal4j:1.16.2" + + // Jackson + api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" + api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" + api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" + + // Netty api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-handler-proxy:${versions.netty}" api "io.netty:netty-resolver-dns:${versions.netty}" - // reactor - api "io.projectreactor.netty:reactor-netty-core:${versions.reactorNetty}" - api "io.projectreactor.netty:reactor-netty-http:${versions.reactorNetty}" - api "io.projectreactor:reactor-core:${versions.reactorCore}" - api "org.reactivestreams:reactive-streams:${versions.reactiveStreams}" + // Reactor + api "io.projectreactor.netty:reactor-netty-core:${versions.azureReactorNetty}" + api "io.projectreactor.netty:reactor-netty-http:${versions.azureReactorNetty}" + api "io.projectreactor:reactor-core:3.4.38" + api "org.reactivestreams:reactive-streams:1.0.4" + + // Others + api "com.fasterxml.woodstox:woodstox-core:6.7.0" + api "com.github.stephenc.jcip:jcip-annotations:1.0-1" + api "com.nimbusds:content-type:2.3" + api "com.nimbusds:lang-tag:1.7" + api "com.nimbusds:nimbus-jose-jwt:9.37.3" + api "com.nimbusds:oauth2-oidc-sdk:11.9.1" + api "jakarta.activation:jakarta.activation-api:1.2.1" + api "jakarta.xml.bind:jakarta.xml.bind-api:2.3.3" + api "net.java.dev.jna:jna-platform:${versions.jna}" // Maven says 5.14.0 but this aligns with the Elasticsearch-wide version + api "net.java.dev.jna:jna:${versions.jna}" // Maven says 5.14.0 but this aligns with the Elasticsearch-wide version + api "net.minidev:accessors-smart:2.5.0" + api "net.minidev:json-smart:2.5.0" + api "org.codehaus.woodstox:stax2-api:4.2.2" + api "org.ow2.asm:asm:9.3" + + runtimeOnly "com.google.crypto.tink:tink:1.14.0" + runtimeOnly "com.google.protobuf:protobuf-java:4.27.0" + runtimeOnly "com.google.code.gson:gson:2.11.0" + runtimeOnly "org.cryptomator:siv-mode:1.5.2" implementation project(":modules:transport-netty4") implementation("org.slf4j:slf4j-api:${versions.slf4j}") runtimeOnly "org.slf4j:slf4j-nop:${versions.slf4j}" // runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}") https://github.com/elastic/elasticsearch/issues/93714 - - runtimeOnly "com.azure:azure-storage-internal-avro:${versions.azureAvro}" - testImplementation project(':test:fixtures:azure-fixture') yamlRestTestImplementation project(':test:fixtures:azure-fixture') } @@ -165,12 +171,94 @@ tasks.named("thirdPartyAudit").configure { 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', // [missing classes] SLF4j includes an optional class that depends on an extension class. see Log4jLogger#createConverter // 'org.slf4j.ext.EventData' - bring back when https://github.com/elastic/elasticsearch/issues/93714 is done + + // Optional dependency of tink + 'com.google.api.client.http.HttpHeaders', + 'com.google.api.client.http.HttpRequest', + 'com.google.api.client.http.HttpRequestFactory', + 'com.google.api.client.http.HttpResponse', + 'com.google.api.client.http.HttpTransport', + 'com.google.api.client.http.javanet.NetHttpTransport', + 'com.google.api.client.http.javanet.NetHttpTransport$Builder', + + // Optional dependency of nimbus-jose-jwt and oauth2-oidc-sdk + 'org.bouncycastle.asn1.pkcs.PrivateKeyInfo', + 'org.bouncycastle.asn1.x509.AlgorithmIdentifier', + 'org.bouncycastle.asn1.x509.SubjectPublicKeyInfo', + 'org.bouncycastle.cert.X509CertificateHolder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder', + 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', + 'org.bouncycastle.crypto.InvalidCipherTextException', + 'org.bouncycastle.crypto.engines.AESEngine', + 'org.bouncycastle.crypto.modes.GCMBlockCipher', + 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider', + 'org.bouncycastle.jce.provider.BouncyCastleProvider', + 'org.bouncycastle.openssl.PEMKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + + // OAuth servlet support is optional and not required + 'jakarta.servlet.ServletRequest', + 'jakarta.servlet.http.HttpServletRequest', + 'jakarta.servlet.http.HttpServletResponse', + 'javax.servlet.ServletRequest', + 'javax.servlet.http.HttpServletRequest', + 'javax.servlet.http.HttpServletResponse', + + // OpenSAML support is optional + 'org.joda.time.DateTime', + 'net.shibboleth.utilities.java.support.xml.SerializeSupport', + 'org.opensaml.core.config.InitializationException', + 'org.opensaml.core.config.InitializationService', + 'org.opensaml.core.xml.XMLObject', + 'org.opensaml.core.xml.XMLObjectBuilder', + 'org.opensaml.core.xml.XMLObjectBuilderFactory', + 'org.opensaml.core.xml.config.XMLObjectProviderRegistrySupport', + 'org.opensaml.core.xml.io.Marshaller', + 'org.opensaml.core.xml.io.MarshallerFactory', + 'org.opensaml.core.xml.io.MarshallingException', + 'org.opensaml.core.xml.io.Unmarshaller', + 'org.opensaml.core.xml.io.UnmarshallerFactory', + 'org.opensaml.core.xml.schema.XSString', + 'org.opensaml.core.xml.schema.impl.XSStringBuilder', + 'org.opensaml.saml.saml2.core.Assertion', + 'org.opensaml.saml.saml2.core.Attribute', + 'org.opensaml.saml.saml2.core.AttributeStatement', + 'org.opensaml.saml.saml2.core.AttributeValue', + 'org.opensaml.saml.saml2.core.Audience', + 'org.opensaml.saml.saml2.core.AudienceRestriction', + 'org.opensaml.saml.saml2.core.AuthnContext', + 'org.opensaml.saml.saml2.core.AuthnContextClassRef', + 'org.opensaml.saml.saml2.core.AuthnStatement', + 'org.opensaml.saml.saml2.core.Conditions', + 'org.opensaml.saml.saml2.core.Issuer', + 'org.opensaml.saml.saml2.core.NameID', + 'org.opensaml.saml.saml2.core.Subject', + 'org.opensaml.saml.saml2.core.SubjectConfirmation', + 'org.opensaml.saml.saml2.core.SubjectConfirmationData', + 'org.opensaml.saml.security.impl.SAMLSignatureProfileValidator', + 'org.opensaml.security.credential.BasicCredential', + 'org.opensaml.security.credential.Credential', + 'org.opensaml.security.credential.UsageType', + 'org.opensaml.xmlsec.signature.Signature', + 'org.opensaml.xmlsec.signature.support.SignatureException', + 'org.opensaml.xmlsec.signature.support.SignatureValidator', + 'org.opensaml.xmlsec.signature.support.Signer', ) ignoreViolations( 'javax.activation.MailcapCommandMap', 'javax.activation.MimetypesFileTypeMap', 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException', + + 'com.google.protobuf.MessageSchema', + 'com.google.protobuf.UnsafeUtil', + 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', ) } @@ -180,6 +268,8 @@ String azureKey = System.getenv("azure_storage_key") String azureContainer = System.getenv("azure_storage_container") String azureBasePath = System.getenv("azure_storage_base_path") String azureSasToken = System.getenv("azure_storage_sas_token") +String azureTenantId = System.getenv("azure_storage_tenant_id") +String azureClientId = System.getenv("azure_storage_client_id") if (!azureAccount && !azureKey && !azureContainer && !azureBasePath && !azureSasToken) { azureAccount = 'azure_integration_test_account' @@ -187,6 +277,8 @@ if (!azureAccount && !azureKey && !azureContainer && !azureBasePath && !azureSas azureContainer = 'container' azureBasePath = '' azureSasToken = '' + azureTenantId = '' + azureClientId = '' useFixture = true } @@ -203,6 +295,7 @@ tasks.named("processYamlRestTestResources") { tasks.named("internalClusterTest") { // this is tested explicitly in a separate test task exclude '**/AzureStorageCleanupThirdPartyTests.class' + systemProperty "AZURE_POD_IDENTITY_AUTHORITY_HOST", "127.0.0.1:1" // ensure a fast failure } tasks.named("yamlRestTest") { @@ -213,6 +306,33 @@ tasks.named("yamlRestTest") { systemProperty 'test.azure.sas_token', azureSasToken } +tasks.register("managedIdentityYamlRestTest", RestIntegTestTask) { + testClassesDirs = sourceSets.yamlRestTest.output.classesDirs + classpath = sourceSets.yamlRestTest.runtimeClasspath + systemProperty 'test.azure.fixture', Boolean.toString(useFixture) + systemProperty 'test.azure.account', azureAccount + systemProperty 'test.azure.container', azureContainer + // omitting key and sas_token so that we use a bearer token from the metadata service +} + +tasks.register("workloadIdentityYamlRestTest", RestIntegTestTask) { + testClassesDirs = sourceSets.yamlRestTest.output.classesDirs + classpath = sourceSets.yamlRestTest.runtimeClasspath + systemProperty 'test.azure.fixture', Boolean.toString(useFixture) + systemProperty 'test.azure.account', azureAccount + systemProperty 'test.azure.container', azureContainer + // random uuids to satisfy format requirements -- the actual values are not important + systemProperty 'test.azure.tenant_id', azureTenantId ?: "583d4f71-148a-4163-bad5-2311e13c60dc" + systemProperty 'test.azure.client_id', azureClientId ?: "86dd1b33-96c1-4a2e-92ac-b844404fc691" + // omitting key and sas_token so that we use a bearer token from workload identity +} + +if (BuildParams.inFipsJvm) { + // Cannot override the trust store in FIPS mode, and these tasks require a HTTPS fixture + tasks.named("managedIdentityYamlRestTest").configure { enabled = false } + tasks.named("workloadIdentityYamlRestTest").configure { enabled = false } +} + tasks.register("azureThirdPartyUnitTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) @@ -233,4 +353,6 @@ tasks.register('azureThirdPartyTest') { tasks.named("check") { dependsOn("azureThirdPartyUnitTest") + dependsOn("managedIdentityYamlRestTest") + dependsOn("workloadIdentityYamlRestTest") } diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt b/modules/repository-azure/licenses/accessors-smart-LICENSE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-LICENSE.txt rename to modules/repository-azure/licenses/accessors-smart-LICENSE.txt diff --git a/modules/repository-azure/licenses/accessors-smart-NOTICE.txt b/modules/repository-azure/licenses/accessors-smart-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/asm-LICENSE.txt b/modules/repository-azure/licenses/asm-LICENSE.txt new file mode 100644 index 0000000000000..22649e87adc9b --- /dev/null +++ b/modules/repository-azure/licenses/asm-LICENSE.txt @@ -0,0 +1,80 @@ + + + + + + ASM - License + + + + +

+ + + + + ASM + + + +
+ + + +
+

License

+

ASM is released under the following 3-Clause BSD License:

+
ASM: a very small and fast Java bytecode manipulation framework
+Copyright (c) 2000-2011 INRIA, France Telecom
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+3. Neither the name of the copyright holders nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ + diff --git a/modules/repository-azure/licenses/asm-NOTICE.txt b/modules/repository-azure/licenses/asm-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/modules/repository-azure/licenses/asm-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/modules/repository-azure/licenses/content-type-LICENSE.txt b/modules/repository-azure/licenses/content-type-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/content-type-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/content-type-NOTICE.txt b/modules/repository-azure/licenses/content-type-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/gson-LICENSE.txt b/modules/repository-azure/licenses/gson-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/gson-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/gson-NOTICE.txt b/modules/repository-azure/licenses/gson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/jcip-annotations-LICENSE.txt b/modules/repository-azure/licenses/jcip-annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/jcip-annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/jcip-annotations-NOTICE.txt b/modules/repository-azure/licenses/jcip-annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/jna-LICENSE.txt b/modules/repository-azure/licenses/jna-LICENSE.txt new file mode 100644 index 0000000000000..f433b1a53f5b8 --- /dev/null +++ b/modules/repository-azure/licenses/jna-LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/modules/repository-azure/licenses/jna-NOTICE.txt b/modules/repository-azure/licenses/jna-NOTICE.txt new file mode 100644 index 0000000000000..8d1c8b69c3fce --- /dev/null +++ b/modules/repository-azure/licenses/jna-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/modules/repository-azure/licenses/jna-platform-LICENSE.txt b/modules/repository-azure/licenses/jna-platform-LICENSE.txt new file mode 100644 index 0000000000000..f433b1a53f5b8 --- /dev/null +++ b/modules/repository-azure/licenses/jna-platform-LICENSE.txt @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/modules/repository-azure/licenses/jna-platform-NOTICE.txt b/modules/repository-azure/licenses/jna-platform-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/json-smart-LICENSE.txt b/modules/repository-azure/licenses/json-smart-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/json-smart-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/json-smart-NOTICE.txt b/modules/repository-azure/licenses/json-smart-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/lang-tag-LICENSE.txt b/modules/repository-azure/licenses/lang-tag-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/lang-tag-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/lang-tag-NOTICE.txt b/modules/repository-azure/licenses/lang-tag-NOTICE.txt new file mode 100644 index 0000000000000..37a85f6850d57 --- /dev/null +++ b/modules/repository-azure/licenses/lang-tag-NOTICE.txt @@ -0,0 +1,14 @@ +Nimbus Language Tags + +Copyright 2012-2016, Connect2id Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/modules/repository-azure/licenses/msal4j-LICENSE.txt b/modules/repository-azure/licenses/msal4j-LICENSE.txt new file mode 100644 index 0000000000000..21071075c2459 --- /dev/null +++ b/modules/repository-azure/licenses/msal4j-LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/modules/repository-azure/licenses/msal4j-NOTICE.txt b/modules/repository-azure/licenses/msal4j-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/msal4j-persistence-extension-LICENSE.txt b/modules/repository-azure/licenses/msal4j-persistence-extension-LICENSE.txt new file mode 100644 index 0000000000000..4b1ad51b2f0ef --- /dev/null +++ b/modules/repository-azure/licenses/msal4j-persistence-extension-LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/modules/repository-azure/licenses/msal4j-persistence-extension-NOTICE.txt b/modules/repository-azure/licenses/msal4j-persistence-extension-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/nimbus-jose-jwt-LICENSE.txt b/modules/repository-azure/licenses/nimbus-jose-jwt-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/nimbus-jose-jwt-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt b/modules/repository-azure/licenses/nimbus-jose-jwt-NOTICE.txt similarity index 100% rename from x-pack/plugin/security/licenses/nimbus-jose-jwt-NOTICE.txt rename to modules/repository-azure/licenses/nimbus-jose-jwt-NOTICE.txt diff --git a/modules/repository-azure/licenses/oauth2-oidc-sdk-LICENSE.txt b/modules/repository-azure/licenses/oauth2-oidc-sdk-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/oauth2-oidc-sdk-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/oauth2-oidc-sdk-NOTICE.txt b/modules/repository-azure/licenses/oauth2-oidc-sdk-NOTICE.txt new file mode 100644 index 0000000000000..5e111b04cfc45 --- /dev/null +++ b/modules/repository-azure/licenses/oauth2-oidc-sdk-NOTICE.txt @@ -0,0 +1,14 @@ +Nimbus OAuth 2.0 SDK with OpenID Connect extensions + +Copyright 2012-2018, Connect2id Ltd and contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. diff --git a/modules/repository-azure/licenses/protobuf-java-LICENSE.txt b/modules/repository-azure/licenses/protobuf-java-LICENSE.txt new file mode 100644 index 0000000000000..19b305b00060a --- /dev/null +++ b/modules/repository-azure/licenses/protobuf-java-LICENSE.txt @@ -0,0 +1,32 @@ +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. diff --git a/modules/repository-azure/licenses/protobuf-java-NOTICE.txt b/modules/repository-azure/licenses/protobuf-java-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/siv-mode-LICENSE.txt b/modules/repository-azure/licenses/siv-mode-LICENSE.txt new file mode 100644 index 0000000000000..edfa1ebd93281 --- /dev/null +++ b/modules/repository-azure/licenses/siv-mode-LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2015-2017 Cryptomator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/modules/repository-azure/licenses/siv-mode-NOTICE.txt b/modules/repository-azure/licenses/siv-mode-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/licenses/tink-LICENSE.txt b/modules/repository-azure/licenses/tink-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/repository-azure/licenses/tink-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/repository-azure/licenses/tink-NOTICE.txt b/modules/repository-azure/licenses/tink-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index e916b02e62b8e..15d47f6bec800 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -153,9 +153,8 @@ long getUploadBlockSize() { @SuppressForbidden(reason = "this test uses a HttpHandler to emulate an Azure endpoint") private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implements BlobStoreHttpHandler { - AzureBlobStoreHttpHandler(final String account, final String container) { - super(account, container); + super(account, container, null /* no auth header validation - sometimes it's omitted in these tests (TODO why?) */); } } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureRepositoryMissingCredentialsIT.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureRepositoryMissingCredentialsIT.java new file mode 100644 index 0000000000000..0ce8b7a3e8ea4 --- /dev/null +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureRepositoryMissingCredentialsIT.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; + +public class AzureRepositoryMissingCredentialsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), AzureRepositoryPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("default").getKey(), "test-account"); + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).setSecureSettings(secureSettings).build(); + } + + public void testMissingCredentialsException() { + assertThat( + asInstanceOf( + RepositoryVerificationException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + AcknowledgedResponse.class, + l -> client().execute( + TransportPutRepositoryAction.TYPE, + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").type("azure"), + l + ) + ) + ) + ).getCause().getMessage(), + allOf( + containsString("EnvironmentCredential authentication unavailable"), + containsString("WorkloadIdentityCredential authentication unavailable"), + containsString("Managed Identity authentication is not available"), + containsString("SharedTokenCacheCredential authentication unavailable") + ) + ); + } +} diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 5436802bff71a..80bd788da9a9f 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -10,10 +10,12 @@ import fixture.azure.AzureHttpFixture; +import com.azure.core.exception.HttpResponseException; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.models.BlobStorageException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -44,11 +46,16 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final String AZURE_ACCOUNT = System.getProperty("test.azure.account"); + @ClassRule public static AzureHttpFixture fixture = new AzureHttpFixture( - USE_FIXTURE, - System.getProperty("test.azure.account"), - System.getProperty("test.azure.container") + USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, + AZURE_ACCOUNT, + System.getProperty("test.azure.container"), + System.getProperty("test.azure.tenant_id"), + System.getProperty("test.azure.client_id"), + AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_ACCOUNT) ); @Override @@ -183,8 +190,8 @@ public void testMultiBlockUpload() throws Exception { public void testReadFromPositionLargerThanBlobLength() { testReadFromPositionLargerThanBlobLength( - e -> asInstanceOf(BlobStorageException.class, e.getCause()).getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED - .getStatus() + e -> asInstanceOf(BlobStorageException.class, ExceptionsHelper.unwrap(e, HttpResponseException.class)) + .getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus() ); } } diff --git a/modules/repository-azure/src/main/java/module-info.java b/modules/repository-azure/src/main/java/module-info.java index cc34996ee2d1f..bef2a175a112f 100644 --- a/modules/repository-azure/src/main/java/module-info.java +++ b/modules/repository-azure/src/main/java/module-info.java @@ -21,6 +21,7 @@ requires com.azure.http.netty; requires com.azure.storage.blob; requires com.azure.storage.common; + requires com.azure.identity; requires io.netty.buffer; requires io.netty.transport; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index d271c8d1e99a1..eb643b3c1460d 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -8,11 +8,11 @@ package org.elasticsearch.repositories.azure; -import com.azure.storage.blob.models.BlobStorageException; +import com.azure.core.exception.HttpResponseException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.core.util.Throwables; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -69,13 +69,13 @@ private InputStream openInputStream(OperationPurpose purpose, String blobName, l try { return blobStore.getInputStream(blobKey, position, length); } catch (Exception e) { - Throwable rootCause = Throwables.getRootCause(e); - if (rootCause instanceof BlobStorageException blobStorageException) { - if (blobStorageException.getStatusCode() == RestStatus.NOT_FOUND.getStatus()) { + if (ExceptionsHelper.unwrap(e, HttpResponseException.class) instanceof HttpResponseException httpResponseException) { + final var httpStatusCode = httpResponseException.getResponse().getStatusCode(); + if (httpStatusCode == RestStatus.NOT_FOUND.getStatus()) { throw new NoSuchFileException("Blob [" + blobKey + "] not found"); } - if (blobStorageException.getStatusCode() == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { - throw new RequestedRangeNotSatisfiedException(blobKey, position, length == null ? -1 : length, blobStorageException); + if (httpStatusCode == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { + throw new RequestedRangeNotSatisfiedException(blobKey, position, length == null ? -1 : length, e); } } throw new IOException("Unable to get input stream for blob [" + blobKey + "]", e); diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java index cdfd83b79b370..911d1c167db7f 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java @@ -28,6 +28,7 @@ import com.azure.core.http.ProxyOptions; import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.identity.DefaultAzureCredentialBuilder; import com.azure.storage.blob.BlobServiceAsyncClient; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.BlobServiceClientBuilder; @@ -62,6 +63,17 @@ class AzureClientProvider extends AbstractLifecycleComponent { private static final int DEFAULT_EVENT_LOOP_THREAD_COUNT = 1; private static final int PENDING_CONNECTION_QUEUE_SIZE = -1; // see ConnectionProvider.ConnectionPoolSpec.pendingAcquireMaxCount + /** + * Test-only system property to disable instance discovery for workload identity authentication in the Azure SDK. + * This is necessary since otherwise the SDK will attempt to verify identities via a real host + * (e.g. https://login.microsoft.com/) for + * workload identity authentication. This is incompatible with our test environment. + */ + private static final boolean DISABLE_INSTANCE_DISCOVERY = System.getProperty( + "tests.azure.credentials.disable_instance_discovery", + "false" + ).equals("true"); + static final Setting EVENT_LOOP_THREAD_COUNT = Setting.intSetting( "repository.azure.http_client.event_loop_executor_thread_count", DEFAULT_EVENT_LOOP_THREAD_COUNT, @@ -168,6 +180,14 @@ AzureBlobServiceClient createClient( .httpClient(httpClient) .retryOptions(retryOptions); + if (settings.hasCredentials() == false) { + final DefaultAzureCredentialBuilder credentialBuilder = new DefaultAzureCredentialBuilder().executorService(eventLoopGroup); + if (DISABLE_INSTANCE_DISCOVERY) { + credentialBuilder.disableInstanceDiscovery(); + } + builder.credential(credentialBuilder.build()); + } + if (successfulRequestConsumer != null) { builder.addPolicy(new SuccessfulRequestTracker(successfulRequestConsumer)); } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 388474acc75ea..c8c0b15db5ebe 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -26,6 +26,7 @@ import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.core.Strings.format; @@ -175,4 +176,9 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + protected Set getExtraUsageFeatures() { + return storageService.getExtraUsageFeatures(Repository.CLIENT_NAME.get(getMetadata().settings())); + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 73d969ee31b19..f6b1c2775926c 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -27,6 +27,8 @@ import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -42,9 +44,8 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, R public static final String NETTY_EVENT_LOOP_THREAD_POOL_NAME = "azure_event_loop"; static { - // Trigger static initialization with the plugin class loader - // so we have access to the proper xml parser - JacksonAdapter.createDefaultSerializerAdapter(); + // Trigger static initialization with the plugin class loader so we have access to the proper xml parser + AccessController.doPrivileged((PrivilegedAction) JacksonAdapter::createDefaultSerializerAdapter); } // protected for testing diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d6cd7bf3d246..09088004759a8 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -24,6 +24,7 @@ import java.net.Proxy; import java.net.URL; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; @@ -165,4 +166,15 @@ public void refreshSettings(Map clientsSettings) { this.storageSettings = Map.copyOf(clientsSettings); // clients are built lazily by {@link client(String, LocationMode)} } + + /** + * For Azure repositories, we report the different kinds of credentials in use in the telemetry. + */ + public Set getExtraUsageFeatures(String clientName) { + try { + return getClientSettings(clientName).credentialsUsageFeatures(); + } catch (Exception e) { + return Set.of(); + } + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 93a388dea98ae..2333a1fdb9e93 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Set; final class AzureStorageSettings { @@ -124,15 +125,13 @@ final class AzureStorageSettings { ); private final String account; - - @Nullable - private final String sasToken; - private final String connectString; private final String endpointSuffix; private final TimeValue timeout; private final int maxRetries; private final Proxy proxy; + private final boolean hasCredentials; + private final Set credentialsUsageFeatures; private AzureStorageSettings( String account, @@ -148,11 +147,17 @@ private AzureStorageSettings( String secondaryEndpoint ) { this.account = account; - this.sasToken = sasToken; this.connectString = buildConnectString(account, key, sasToken, endpointSuffix, endpoint, secondaryEndpoint); + this.hasCredentials = Strings.hasText(key) || Strings.hasText(sasToken); this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; + this.credentialsUsageFeatures = Strings.hasText(key) ? Set.of("uses_key_credentials") + : Strings.hasText(sasToken) ? Set.of("uses_sas_token") + : SocketAccess.doPrivilegedException(() -> System.getenv("AZURE_FEDERATED_TOKEN_FILE")) == null + ? Set.of("uses_default_credentials", "uses_managed_identity") + : Set.of("uses_default_credentials", "uses_workload_identity"); + // Register the proxy if we have any // Validate proxy settings if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { @@ -203,33 +208,32 @@ private static String buildConnectString( ) { final boolean hasSasToken = Strings.hasText(sasToken); final boolean hasKey = Strings.hasText(key); - if (hasSasToken == false && hasKey == false) { - throw new SettingsException("Neither a secret key nor a shared access token was set."); - } if (hasSasToken && hasKey) { - throw new SettingsException("Both a secret as well as a shared access token were set."); + throw new SettingsException("Both a secret as well as a shared access token were set for account [" + account + "]"); } final StringBuilder connectionStringBuilder = new StringBuilder(); connectionStringBuilder.append("DefaultEndpointsProtocol=https").append(";AccountName=").append(account); if (hasKey) { connectionStringBuilder.append(";AccountKey=").append(key); - } else { + } else if (hasSasToken) { connectionStringBuilder.append(";SharedAccessSignature=").append(sasToken); + } else { + connectionStringBuilder.append(";AccountKey=none"); // required for validation, but ignored } final boolean hasEndpointSuffix = Strings.hasText(endpointSuffix); final boolean hasEndpoint = Strings.hasText(endpoint); final boolean hasSecondaryEndpoint = Strings.hasText(secondaryEndpoint); if (hasEndpointSuffix && hasEndpoint) { - throw new SettingsException("Both an endpoint suffix as well as a primary endpoint were set"); + throw new SettingsException("Both an endpoint suffix as well as a primary endpoint were set for account [" + account + "]"); } if (hasEndpointSuffix && hasSecondaryEndpoint) { - throw new SettingsException("Both an endpoint suffix as well as a secondary endpoint were set"); + throw new SettingsException("Both an endpoint suffix as well as a secondary endpoint were set for account [" + account + "]"); } if (hasEndpoint == false && hasSecondaryEndpoint) { - throw new SettingsException("A primary endpoint is required when setting a secondary endpoint"); + throw new SettingsException("A primary endpoint is required when setting a secondary endpoint for account [" + account + "]"); } if (hasEndpointSuffix) { @@ -318,6 +322,10 @@ private static T getValue(Settings settings, String groupName, Setting se private static final String BLOB_ENDPOINT_NAME = "BlobEndpoint"; private static final String BLOB_SECONDARY_ENDPOINT_NAME = "BlobSecondaryEndpoint"; + public boolean hasCredentials() { + return hasCredentials; + } + record StorageEndpoint(String primaryURI, @Nullable String secondaryURI) {} StorageEndpoint getStorageEndpoint() { @@ -366,4 +374,8 @@ private String deriveURIFromSettings(boolean isPrimary) { throw new IllegalArgumentException(e); } } + + public Set credentialsUsageFeatures() { + return credentialsUsageFeatures; + } } diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java index e9b438339f361..79fa2afe5d49e 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java @@ -92,9 +92,10 @@ public void testCanNotCreateAClientWithSecondaryLocationWithoutAProperEndpoint() LocationMode locationMode = LocationMode.SECONDARY_ONLY; RequestRetryOptions requestRetryOptions = new RequestRetryOptions(); - expectThrows(IllegalArgumentException.class, () -> { - azureClientProvider.createClient(storageSettings, locationMode, requestRetryOptions, null, EMPTY_CONSUMER); - }); + expectThrows( + IllegalArgumentException.class, + () -> azureClientProvider.createClient(storageSettings, locationMode, requestRetryOptions, null, EMPTY_CONSUMER) + ); } private static String encodeKey(final String value) { diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 6f43f253db4c8..3f9fba35d9f4a 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -186,23 +186,20 @@ public void testReinitClientWrongSettings() throws IOException { secureSettings1.setString("azure.client.azure1.account", "myaccount1"); secureSettings1.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings1 = Settings.builder().setSecureSettings(secureSettings1).build(); + final MockSecureSettings secureSettings2 = new MockSecureSettings(); - secureSettings2.setString("azure.client.azure1.account", "myaccount1"); - // missing key + secureSettings2.setString("azure.client.azure1.account", "myaccount3"); + secureSettings2.setString("azure.client.azure1.key", encodeKey("mykey33")); + secureSettings2.setString("azure.client.azure1.sas_token", encodeKey("mysasToken33")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - final MockSecureSettings secureSettings3 = new MockSecureSettings(); - secureSettings3.setString("azure.client.azure1.account", "myaccount3"); - secureSettings3.setString("azure.client.azure1.key", encodeKey("mykey33")); - secureSettings3.setString("azure.client.azure1.sas_token", encodeKey("mysasToken33")); - final Settings settings3 = Settings.builder().setSecureSettings(secureSettings3).build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService.get(); AzureBlobServiceClient client11 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); - final SettingsException e1 = expectThrows(SettingsException.class, () -> plugin.reload(settings2)); - assertThat(e1.getMessage(), is("Neither a secret key nor a shared access token was set.")); - final SettingsException e2 = expectThrows(SettingsException.class, () -> plugin.reload(settings3)); - assertThat(e2.getMessage(), is("Both a secret as well as a shared access token were set.")); + assertThat( + expectThrows(SettingsException.class, () -> plugin.reload(settings2)).getMessage(), + is("Both a secret as well as a shared access token were set for account [myaccount3]") + ); // existing client untouched assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); } @@ -499,7 +496,7 @@ public void testEndpointSettingValidation() { .build() ) ); - assertEquals("A primary endpoint is required when setting a secondary endpoint", e.getMessage()); + assertEquals("A primary endpoint is required when setting a secondary endpoint for account [myaccount1]", e.getMessage()); } { @@ -513,7 +510,7 @@ public void testEndpointSettingValidation() { .build() ) ); - assertEquals("Both an endpoint suffix as well as a secondary endpoint were set", e.getMessage()); + assertEquals("Both an endpoint suffix as well as a secondary endpoint were set for account [myaccount1]", e.getMessage()); } } diff --git a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index b9d5705aff91e..515b1aa2b6d6a 100644 --- a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -13,22 +13,56 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Booleans; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TestTrustStore; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.util.Map; +import java.util.function.Predicate; + public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); + private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; + // TODO when https://github.com/elastic/elasticsearch/issues/111532 addressed, use a HTTPS fixture in FIPS mode too + private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account"); private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container"); private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key"); private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token"); + private static final String AZURE_TEST_TENANT_ID = System.getProperty("test.azure.tenant_id"); + private static final String AZURE_TEST_CLIENT_ID = System.getProperty("test.azure.client_id"); - private static AzureHttpFixture fixture = new AzureHttpFixture(USE_FIXTURE, AZURE_TEST_ACCOUNT, AZURE_TEST_CONTAINER); + private static final AzureHttpFixture fixture = new AzureHttpFixture( + USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE, + AZURE_TEST_ACCOUNT, + AZURE_TEST_CONTAINER, + AZURE_TEST_TENANT_ID, + AZURE_TEST_CLIENT_ID, + decideAuthHeaderPredicate() + ); + + private static Predicate decideAuthHeaderPredicate() { + if (Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_SASTOKEN)) { + return AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT); + } else if (Strings.hasText(AZURE_TEST_TENANT_ID) && Strings.hasText(AZURE_TEST_CLIENT_ID)) { + return AzureHttpFixture.WORK_IDENTITY_BEARER_TOKEN_PREDICATE; + } else if (Strings.hasText(AZURE_TEST_TENANT_ID) || Strings.hasText(AZURE_TEST_CLIENT_ID)) { + fail(null, "Both [test.azure.tenant_id] and [test.azure.client_id] must be set if either is set"); + } + return AzureHttpFixture.MANAGED_IDENTITY_BEARER_TOKEN_PREDICATE; + } + + private static TestTrustStore trustStore = new TestTrustStore( + () -> AzureHttpFixture.class.getResourceAsStream("azure-http-fixture.pem") + ); private static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-azure") @@ -45,14 +79,31 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestC ) .setting( "azure.client.integration_test.endpoint_suffix", - () -> "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + fixture.getAddress(), + () -> "ignored;DefaultEndpointsProtocol=https;BlobEndpoint=" + fixture.getAddress(), s -> USE_FIXTURE ) + .systemProperty( + "tests.azure.credentials.disable_instance_discovery", + () -> "true", + s -> USE_HTTPS_FIXTURE && Strings.hasText(AZURE_TEST_CLIENT_ID) && Strings.hasText(AZURE_TEST_TENANT_ID) + ) + .systemProperty("AZURE_POD_IDENTITY_AUTHORITY_HOST", fixture::getMetadataAddress, s -> USE_FIXTURE) + .systemProperty("AZURE_AUTHORITY_HOST", fixture::getOAuthTokenServiceAddress, s -> USE_HTTPS_FIXTURE) + .systemProperty("AZURE_CLIENT_ID", () -> AZURE_TEST_CLIENT_ID, s -> Strings.hasText(AZURE_TEST_CLIENT_ID)) + .systemProperty("AZURE_TENANT_ID", () -> AZURE_TEST_TENANT_ID, s -> Strings.hasText(AZURE_TEST_TENANT_ID)) + .configFile("storage-azure/azure-federated-token", Resource.fromString(fixture.getFederatedToken())) + .environment( + nodeSpec -> USE_HTTPS_FIXTURE && Strings.hasText(AZURE_TEST_CLIENT_ID) && Strings.hasText(AZURE_TEST_TENANT_ID) + ? Map.of("AZURE_FEDERATED_TOKEN_FILE", "${ES_PATH_CONF}/storage-azure/azure-federated-token") + : Map.of() + ) .setting("thread_pool.repository_azure.max", () -> String.valueOf(randomIntBetween(1, 10)), s -> USE_FIXTURE) + .systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE) + .systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE) .build(); - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(fixture).around(cluster); + @ClassRule(order = 1) + public static TestRule ruleChain = RuleChain.outerRule(fixture).around(trustStore).around(cluster); public RepositoryAzureClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 299183f26d9dc..a4a7d0b22a0ed 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -235,6 +235,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.azure.count: 1 } + - gte: { repositories.azure.read_write: 1 } + --- teardown: diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index 68d61be4983c5..e8c34a4b6a20b 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -232,6 +232,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.gcs.count: 1 } + - gte: { repositories.gcs.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index 640293ecb80b0..31fa47fb7b196 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -36,8 +36,9 @@ import java.util.Map; import java.util.Queue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; -import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_HISTOGRAM; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL; @@ -52,6 +53,7 @@ import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -98,6 +100,35 @@ private static BlobContainer getBlobContainer(String dataNodeName, String reposi return blobStoreRepository.blobStore().blobContainer(BlobPath.EMPTY.add(randomIdentifier())); } + public void testHttpRequestTimeCaptureInMilliseconds() throws IOException { + final String repository = createRepository(randomRepositoryName()); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); + final OperationPurpose purpose = randomFrom(OperationPurpose.values()); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); + final String blobName = randomIdentifier(); + + long before = System.nanoTime(); + blobContainer.writeBlob(purpose, blobName, new BytesArray(randomBytes(between(10, 1000))), false); + long elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat(getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), lessThanOrEqualTo(elapsed)); + + plugin.resetMeter(); + before = System.nanoTime(); + blobContainer.readBlob(purpose, blobName).close(); + elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat(getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), lessThanOrEqualTo(elapsed)); + + plugin.resetMeter(); + before = System.nanoTime(); + blobContainer.deleteBlobsIgnoringIfNotExists(purpose, Iterators.single(blobName)); + elapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - before); + assertThat( + getLongHistogramValue(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.DELETE_OBJECTS), + lessThanOrEqualTo(elapsed) + ); + } + public void testMetricsWithErrors() throws IOException { final String repository = createRepository(randomRepositoryName()); @@ -121,7 +152,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.PUT_OBJECT), equalTo(2L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.PUT_OBJECT), equalTo(2L * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.PUT_OBJECT), equalTo(batch)); } // Get not found @@ -141,7 +172,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); // Make sure we don't hit the request range not satisfied counters assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_REQUEST_RANGE_NOT_SATISFIED_TOTAL, Operation.GET_OBJECT), equalTo(0L)); @@ -164,7 +195,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.LIST_OBJECTS), equalTo(5L * batch)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(5L * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.LIST_OBJECTS), equalTo(batch)); } // Delete to clean up @@ -176,7 +207,7 @@ public void testMetricsWithErrors() throws IOException { assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_EXCEPTIONS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(0L)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.DELETE_OBJECTS), equalTo(1L)); } public void testMetricsForRequestRangeNotSatisfied() { @@ -208,7 +239,7 @@ public void testMetricsForRequestRangeNotSatisfied() { ); assertThat(getLongCounterValue(plugin, METRIC_THROTTLES_TOTAL, Operation.GET_OBJECT), equalTo(2 * batch)); assertThat(getLongHistogramValue(plugin, METRIC_THROTTLES_HISTOGRAM, Operation.GET_OBJECT), equalTo(2 * batch)); - assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); + assertThat(getNumberOfMeasurements(plugin, HTTP_REQUEST_TIME_IN_MILLIS_HISTOGRAM, Operation.GET_OBJECT), equalTo(batch)); } } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1132111826563..1ab370ad203fc 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -10,13 +10,20 @@ import fixture.s3.S3HttpHandler; import com.amazonaws.http.AmazonHttpClient; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.LogEvent; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -54,6 +61,7 @@ import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -70,6 +78,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -81,6 +90,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; @@ -451,6 +461,106 @@ private Map getServerMetrics() { return Collections.emptyMap(); } + public void testMultipartUploadCleanup() { + final String repoName = randomRepositoryName(); + createRepository(repoName, repositorySettings(repoName), true); + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = randomIdentifier(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + final var repository = asInstanceOf( + S3Repository.class, + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName) + ); + final var blobStore = asInstanceOf(S3BlobStore.class, asInstanceOf(BlobStoreWrapper.class, repository.blobStore()).delegate()); + + try (var clientRef = blobStore.clientReference()) { + final var danglingBlobName = randomIdentifier(); + final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( + blobStore.bucket(), + blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName + ); + initiateMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); + + final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( + repository.basePath().buildAsString() + ); + listMultipartUploadsRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + assertEquals( + List.of(multipartUploadResult.getUploadId()), + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList() + ); + + final var seenCleanupLogLatch = new CountDownLatch(1); + MockLog.assertThatLogger(() -> { + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)); + safeAwait(seenCleanupLogLatch); + }, + S3BlobContainer.class, + new MockLog.SeenEventExpectation( + "found-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + "found [1] possibly-dangling multipart uploads; will clean them up after finalizing the current snapshot deletions" + ), + new MockLog.SeenEventExpectation( + "cleaned-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + Strings.format( + "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", + multipartUploadResult.getUploadId(), + repoName, + danglingBlobName + ) + ) { + @Override + public void match(LogEvent event) { + super.match(event); + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { + seenCleanupLogLatch.countDown(); + } + } + } + ); + + assertThat( + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList(), + empty() + ); + } + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -592,6 +702,9 @@ public void maybeTrack(final String rawRequest, Headers requestHeaders) { trackRequest("ListObjects"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong()) .incrementAndGet(); + } else if (Regex.simpleMatch("GET /*/?uploads&*", request)) { + // TODO track ListMultipartUploads requests + logger.info("--> ListMultipartUploads not tracked [{}] with parsed purpose [{}]", request, purpose.getKey()); } else if (Regex.simpleMatch("GET /*/*", request)) { trackRequest("GetObject"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong()) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index b5fc37e859b9b..cf3e73df2aee2 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -28,13 +28,17 @@ import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.util.ValidationUtils; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.ThreadedActionListener; +import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -54,6 +58,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.threadpool.ThreadPool; @@ -644,7 +649,7 @@ void run(BytesReference expected, BytesReference updated, ActionListenerandThen((l, ignored) -> getRegister(purpose, rawKey, l)) + .andThen(l -> getRegister(purpose, rawKey, l)) // Step 5: Perform the compare-and-swap by completing our upload iff the witnessed value matches the expected value. @@ -912,4 +917,94 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) { + try (var clientReference = blobStore.clientReference()) { + final var bucket = blobStore.bucket(); + final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads); + request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()); + final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request)); + final var multipartUploads = multipartUploadListing.getMultipartUploads(); + if (multipartUploads.isEmpty()) { + logger.debug("found no multipart uploads to clean up"); + return ActionListener.noop(); + } else { + // the uploads are only _possibly_ dangling because it's also possible we're no longer then master and the new master has + // started some more shard snapshots + if (multipartUploadListing.isTruncated()) { + logger.info(""" + found at least [{}] possibly-dangling multipart uploads; will clean up the first [{}] after finalizing \ + the current snapshot deletions, and will check for further possibly-dangling multipart uploads in future \ + snapshot deletions""", multipartUploads.size(), multipartUploads.size()); + } else { + logger.info(""" + found [{}] possibly-dangling multipart uploads; \ + will clean them up after finalizing the current snapshot deletions""", multipartUploads.size()); + } + return newMultipartUploadCleanupListener( + refs, + multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList() + ); + } + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. + logger.warn("failure while checking for possibly-dangling multipart uploads", e); + return ActionListener.noop(); + } + } + + private ActionListener newMultipartUploadCleanupListener( + RefCountingRunnable refs, + List abortMultipartUploadRequests + ) { + return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + try (var clientReference = blobStore.clientReference()) { + for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) { + abortMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + try { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); + logger.info( + "cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ); + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure + // is surprising, even a 404 means that something else aborted/completed the upload at a point where there + // should be no other processes interacting with the repository. + logger.warn( + Strings.format( + "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ), + e + ); + } + } + } + } + + @Override + public void onFailure(Exception e) { + logger.log( + MasterService.isPublishFailureException(e) + || (e instanceof RepositoryException repositoryException + && repositoryException.getCause() instanceof Exception cause + && MasterService.isPublishFailureException(cause)) ? Level.DEBUG : Level.WARN, + "failed to start cleanup of dangling multipart uploads", + e + ); + } + }, refs.acquire())); + } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 5af53364fb765..03605d50750f0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -224,11 +224,13 @@ private void maybeRecordHttpRequestTime(Request request) { return; } - final long totalTimeInMicros = getTotalTimeInMicros(requestTimesIncludingRetries); - if (totalTimeInMicros == 0) { + final long totalTimeInNanos = getTotalTimeInNanos(requestTimesIncludingRetries); + if (totalTimeInNanos == 0) { logger.warn("Expected HttpRequestTime to be tracked for request [{}] but found no count.", request); } else { - s3RepositoriesMetrics.common().httpRequestTimeInMicroHistogram().record(totalTimeInMicros, attributes); + s3RepositoriesMetrics.common() + .httpRequestTimeInMillisHistogram() + .record(TimeUnit.NANOSECONDS.toMillis(totalTimeInNanos), attributes); } } @@ -271,18 +273,20 @@ private static long getCountForMetric(TimingInfo info, AWSRequestMetrics.Field f } } - private static long getTotalTimeInMicros(List requestTimesIncludingRetries) { - // Here we calculate the timing in Microseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB - // (time to first byte). We calculate the time in micros for later use with an APM style counter (exposed as a long), rather than - // using the default double exposed by getTimeTakenMillisIfKnown(). - long totalTimeInMicros = 0; + private static long getTotalTimeInNanos(List requestTimesIncludingRetries) { + // Here we calculate the timing in Nanoseconds for the sum of the individual subMeasurements with the goal of deriving the TTFB + // (time to first byte). We use high precision time here to tell from the case when request time metric is missing (0). + // The time is converted to milliseconds for later use with an APM style counter (exposed as a long), rather than using the + // default double exposed by getTimeTakenMillisIfKnown(). + // We don't need sub-millisecond precision. So no need perform the data type castings. + long totalTimeInNanos = 0; for (TimingInfo timingInfo : requestTimesIncludingRetries) { var endTimeInNanos = timingInfo.getEndTimeNanoIfKnown(); if (endTimeInNanos != null) { - totalTimeInMicros += TimeUnit.NANOSECONDS.toMicros(endTimeInNanos - timingInfo.getStartTimeNano()); + totalTimeInNanos += endTimeInNanos - timingInfo.getStartTimeNano(); } } - return totalTimeInMicros; + return totalTimeInNanos; } @Override diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 72b48c5903629..d75a3e8ad433e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; @@ -28,6 +29,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -35,15 +37,17 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.util.Collection; import java.util.Map; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -183,6 +187,16 @@ class S3Repository extends MeteredBlobStoreRepository { S3BlobStore.MAX_BULK_DELETES ); + /** + * Maximum number of uploads to request for cleanup when doing a snapshot delete. + */ + static final Setting MAX_MULTIPART_UPLOAD_CLEANUP_SIZE = Setting.intSetting( + "max_multipart_upload_cleanup_size", + 1000, + 0, + Setting.Property.Dynamic + ); + private final S3Service service; private final String bucket; @@ -305,7 +319,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte finalizeSnapshotContext.clusterMetadata(), finalizeSnapshotContext.snapshotInfo(), finalizeSnapshotContext.repositoryMetaVersion(), - delayedListener(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), + wrapWithWeakConsistencyProtection(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), info -> metadataDone.addListener(new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -324,50 +338,19 @@ public void onFailure(Exception e) { super.finalizeSnapshot(wrappedFinalizeContext); } - @Override - protected SnapshotDeleteListener wrapWithWeakConsistencyProtection(SnapshotDeleteListener listener) { - return new SnapshotDeleteListener() { - @Override - public void onDone() { - listener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onRepositoryDataWritten(repositoryData); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onFailure(e); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - /** * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. * See {@link #COOLDOWN_PERIOD} for details. */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { + @Override + protected ActionListener wrapWithWeakConsistencyProtection(ActionListener listener) { + final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; }); return new ActionListener<>() { @Override - public void onResponse(T response) { + public void onResponse(RepositoryData response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), coolDown, snapshotExecutor) @@ -459,4 +442,75 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.S3_COMPATIBLE_REPOSITORIES ); } + + // only one multipart cleanup process running at once + private final AtomicBoolean multipartCleanupInProgress = new AtomicBoolean(); + + @Override + public void deleteSnapshots( + Collection snapshotIds, + long repositoryDataGeneration, + IndexVersion minimumNodeVersion, + ActionListener repositoryDataUpdateListener, + Runnable onCompletion + ) { + getMultipartUploadCleanupListener( + isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), + new ActionListener<>() { + @Override + public void onResponse(ActionListener multipartUploadCleanupListener) { + S3Repository.super.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + repositoryDataUpdateListener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); + } + }, onCompletion); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); + assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything + repositoryDataUpdateListener.onFailure(e); + } + } + ); + } + + /** + * Capture the current list of multipart uploads, and (asynchronously) return a listener which, if completed successfully, aborts those + * uploads. Called at the start of a snapshot delete operation, at which point there should be no ongoing uploads (except in the case of + * a master failover). We protect against the master failover case by waiting until the delete operation successfully updates the root + * index-N blob before aborting any uploads. + */ + void getMultipartUploadCleanupListener(int maxUploads, ActionListener> listener) { + if (maxUploads == 0) { + listener.onResponse(ActionListener.noop()); + return; + } + + if (multipartCleanupInProgress.compareAndSet(false, true) == false) { + logger.info("multipart upload cleanup already in progress"); + listener.onResponse(ActionListener.noop()); + return; + } + + try (var refs = new RefCountingRunnable(() -> multipartCleanupInProgress.set(false))) { + snapshotExecutor.execute( + ActionRunnable.supply( + ActionListener.releaseAfter(listener, refs.acquire()), + () -> blobContainer() instanceof S3BlobContainer s3BlobContainer + ? s3BlobContainer.getMultipartUploadCleanupListener(maxUploads, refs) + : ActionListener.noop() + ) + ); + } + } } diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 77870697f93ae..e88a0861ec01c 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -345,6 +345,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 4a62d6183470d..501af980e17e3 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index e24ff1ad0e559..129f0ba5d7588 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 9c332cc7d9301..de334b4b3df96 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml index 24c2b2b1741d6..09a8526017960 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml @@ -257,6 +257,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/TransportMainAction.java b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/TransportMainAction.java index 6b4b0a52b643a..fa48fc8d2907c 100644 --- a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/TransportMainAction.java +++ b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/TransportMainAction.java @@ -14,9 +14,10 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -33,7 +34,7 @@ public TransportMainAction( ActionFilters actionFilters, ClusterService clusterService ) { - super(MainRestPlugin.MAIN_ACTION.name(), actionFilters, transportService.getTaskManager()); + super(MainRestPlugin.MAIN_ACTION.name(), actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE); this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.clusterService = clusterService; } diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java index 7f310caf4ea05..16d22407c25fa 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java @@ -73,7 +73,7 @@ public GrokHelper(TimeValue interval, TimeValue maxExecutionTime) { return MatcherWatchdog.newInstance( interval.millis(), maxExecutionTime.millis(), - threadPool::relativeTimeInMillis, + threadPool.relativeTimeInMillisSupplier(), (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), threadPool.generic()) ); })::getOrCompute; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java index c4c35b410af78..7432a510763e0 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4ChunkedContinuationsIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.CountDownActionListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.TransportAction; @@ -34,7 +33,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; @@ -57,6 +55,7 @@ import org.elasticsearch.http.HttpRouteStats; import org.elasticsearch.http.HttpRouteStatsTracker; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.BaseRestHandler; @@ -71,6 +70,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; @@ -435,13 +435,22 @@ public static class TransportYieldsContinuationsAction extends TransportAction listener) { - executor.execute(ActionRunnable.supply(listener, () -> new Response(request.failIndex, executor))); + var response = new Response(request.failIndex, executor); + try { + listener.onResponse(response); + } catch (Exception e) { + ESTestCase.fail(e); + } } } @@ -585,18 +594,22 @@ public static class TransportInfiniteContinuationsAction extends TransportAction @Inject public TransportInfiniteContinuationsAction(ActionFilters actionFilters, TransportService transportService) { - super(TYPE.name(), actionFilters, transportService.getTaskManager()); - this.executor = transportService.getThreadPool().executor(ThreadPool.Names.GENERIC); + this(actionFilters, transportService, transportService.getThreadPool().executor(ThreadPool.Names.GENERIC)); + } + + TransportInfiniteContinuationsAction(ActionFilters actionFilters, TransportService transportService, ExecutorService executor) { + super(TYPE.name(), actionFilters, transportService.getTaskManager(), executor); + this.executor = executor; } @Override protected void doExecute(Task task, Request request, ActionListener listener) { - executor.execute( - ActionRunnable.supply( - ActionTestUtils.assertNoFailureListener(listener::onResponse), - () -> new Response(randomFrom(executor, EsExecutors.DIRECT_EXECUTOR_SERVICE)) - ) - ); + var response = new Response(randomFrom(executor, EsExecutors.DIRECT_EXECUTOR_SERVICE)); + try { + listener.onResponse(response); + } catch (Exception e) { + ESTestCase.fail(e); + } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index f48a3143fd016..a309877e9aa83 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -53,6 +53,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.netty4.internal.HttpHeadersAuthenticatorUtils; import org.elasticsearch.http.netty4.internal.HttpValidator; +import org.elasticsearch.rest.ChunkedZipResponse; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.AcceptChannelHandler; @@ -382,7 +383,16 @@ protected boolean isContentAlwaysEmpty(HttpResponse msg) { }) .addLast("aggregator", aggregator); if (handlingSettings.compression()) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel())); + ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel()) { + @Override + protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) throws Exception { + if (ChunkedZipResponse.ZIP_CONTENT_TYPE.equals(httpResponse.headers().get("content-type"))) { + return null; + } else { + return super.beginEncode(httpResponse, acceptEncoding); + } + } + }); } ch.pipeline() .addLast( diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index d85bf32da263f..8080170bf1ee7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -372,7 +372,7 @@ private void setupPipeline(Channel ch, boolean isRemoteClusterServerChannel) { protected InboundPipeline getInboundPipeline(Channel ch, boolean isRemoteClusterServerChannel) { return new InboundPipeline( getStatsTracker(), - threadPool::relativeTimeInMillis, + threadPool.relativeTimeInMillisSupplier(), new InboundDecoder(recycler), new InboundAggregator(getInflightBreaker(), getRequestHandlers()::getHandler, ignoreDeserializationErrors()), this::inboundMessage diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 3e74a74dbd49c..ce7704e6e040c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.DefaultBuiltInExecutorBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportSettings; import org.junit.After; @@ -52,7 +53,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP, new DefaultBuiltInExecutorBuilders()); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); nettyTransport = new Netty4Transport( diff --git a/muted-tests.yml b/muted-tests.yml index ed7556039d0ab..57cc7abb899e7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -38,29 +38,18 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: "org.elasticsearch.xpack.inference.InferenceCrudIT" - issue: "https://github.com/elastic/elasticsearch/issues/109391" - method: "testDeleteEndpointWhileReferencedByPipeline" - class: "org.elasticsearch.xpack.test.rest.XPackRestIT" issue: "https://github.com/elastic/elasticsearch/issues/109687" method: "test {p0=sql/translate/Translate SQL}" -- class: "org.elasticsearch.action.admin.indices.rollover.RolloverIT" - issue: "https://github.com/elastic/elasticsearch/issues/110034" - method: "testRolloverWithClosedWriteIndex" - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testStoreDirectory issue: https://github.com/elastic/elasticsearch/issues/110210 - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testPreload issue: https://github.com/elastic/elasticsearch/issues/110211 -- class: "org.elasticsearch.rest.RestControllerIT" - issue: "https://github.com/elastic/elasticsearch/issues/110225" - class: org.elasticsearch.upgrades.SecurityIndexRolesMetadataMigrationIT method: testMetadataMigratedAfterUpgrade issue: https://github.com/elastic/elasticsearch/issues/110232 -- class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests - method: testLoadAll - issue: https://github.com/elastic/elasticsearch/issues/110244 - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testMinVersionAsNewVersion issue: https://github.com/elastic/elasticsearch/issues/95384 @@ -73,74 +62,118 @@ tests: - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" -- class: org.elasticsearch.xpack.security.LicenseDLSFLSRoleIT - method: testQueryDLSFLSRolesShowAsDisabled - issue: https://github.com/elastic/elasticsearch/issues/110729 - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 -- class: org.elasticsearch.xpack.security.ScrollHelperIntegTests - method: testFetchAllEntities - issue: https://github.com/elastic/elasticsearch/issues/110786 - class: org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFileTests method: testCacheFileCreatedAsSparseFile issue: https://github.com/elastic/elasticsearch/issues/110801 -- class: org.elasticsearch.nativeaccess.PreallocateTests - method: testPreallocate - issue: https://github.com/elastic/elasticsearch/issues/110948 - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 -- class: org.elasticsearch.compute.lucene.ValueSourceReaderTypeConversionTests - method: testLoadAllStatusAllInOnePage - issue: https://github.com/elastic/elasticsearch/issues/111048 -- class: org.elasticsearch.action.search.KnnSearchSingleNodeTests - method: testKnnSearchAction - issue: https://github.com/elastic/elasticsearch/issues/111072 -- class: org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapperTests - method: testKnnQuantizedFlatVectorsFormat - issue: https://github.com/elastic/elasticsearch/issues/111044 - class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownGeoPointIT method: testPushedDownQueriesSingleValue issue: https://github.com/elastic/elasticsearch/issues/111084 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {stats.MaxOfIpGrouping} - issue: https://github.com/elastic/elasticsearch/issues/111107 -- class: org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderIT - method: testEnterpriseDownloaderTask - issue: https://github.com/elastic/elasticsearch/issues/111002 -- class: org.elasticsearch.xpack.esql.spatial.SpatialPushDownCartesianPointIT - method: testPushedDownQueriesSingleValue - issue: https://github.com/elastic/elasticsearch/issues/110982 - class: org.elasticsearch.multi_node.GlobalCheckpointSyncActionIT issue: https://github.com/elastic/elasticsearch/issues/111124 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {stats.MinOfIpGrouping} - issue: https://github.com/elastic/elasticsearch/issues/111125 -- class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests - method: testGetPrivilegesUsesCache - issue: https://github.com/elastic/elasticsearch/issues/110788 -- class: org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankTests - method: testRerankInferenceResultMismatch - issue: https://github.com/elastic/elasticsearch/issues/111133 - class: org.elasticsearch.cluster.PrevalidateShardPathIT method: testCheckShards issue: https://github.com/elastic/elasticsearch/issues/111134 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {stats.MaxOfIpGrouping SYNC} - issue: https://github.com/elastic/elasticsearch/issues/111136 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {stats.MinOfIpGrouping SYNC} - issue: https://github.com/elastic/elasticsearch/issues/111137 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {stats.MinOfIpGrouping ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/111138 - class: org.elasticsearch.packaging.test.DockerTests method: test021InstallPlugin issue: https://github.com/elastic/elasticsearch/issues/110343 +- class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT + method: testAuthenticateWithImplicitFlow + issue: https://github.com/elastic/elasticsearch/issues/111191 +- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT + issue: https://github.com/elastic/elasticsearch/issues/111319 +- class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT + method: testIngestWithInputFields + issue: https://github.com/elastic/elasticsearch/issues/111383 +- class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT + method: testIngestWithMultipleInputFields + issue: https://github.com/elastic/elasticsearch/issues/111384 +- class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT + method: testAuthenticateWithCodeFlowAndClientPost + issue: https://github.com/elastic/elasticsearch/issues/111396 - class: org.elasticsearch.xpack.restart.FullClusterRestartIT - method: testDisableFieldNameField {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111141 + method: testSingleDoc {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/111434 +- class: org.elasticsearch.xpack.restart.FullClusterRestartIT + method: testDataStreams {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/111448 +- class: org.elasticsearch.search.SearchServiceTests + issue: https://github.com/elastic/elasticsearch/issues/111529 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=rollup/security_tests/Index-based access} + issue: https://github.com/elastic/elasticsearch/issues/111631 +- class: org.elasticsearch.tdigest.ComparisonTests + method: testSparseGaussianDistribution + issue: https://github.com/elastic/elasticsearch/issues/111721 +- class: org.elasticsearch.upgrades.FullClusterRestartIT + method: testSnapshotRestore {cluster=OLD} + issue: https://github.com/elastic/elasticsearch/issues/111777 +- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT + method: testSnapshotRestore {cluster=OLD} + issue: https://github.com/elastic/elasticsearch/issues/111775 +- class: org.elasticsearch.upgrades.FullClusterRestartIT + method: testSnapshotRestore {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/111798 +- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT + method: testSnapshotRestore {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/111799 +- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT + method: testScaledFloat + issue: https://github.com/elastic/elasticsearch/issues/112003 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} + issue: https://github.com/elastic/elasticsearch/issues/111999 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAfterMissingIndex + issue: https://github.com/elastic/elasticsearch/issues/112088 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112117 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112118 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/preview_transforms/Test preview transform latest} + issue: https://github.com/elastic/elasticsearch/issues/112144 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112147 +- class: org.elasticsearch.smoketest.WatcherYamlRestIT + method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} + issue: https://github.com/elastic/elasticsearch/issues/112189 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} + issue: https://github.com/elastic/elasticsearch/issues/112191 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAsync + issue: https://github.com/elastic/elasticsearch/issues/112212 +- class: org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilderTests + method: testEqualsAndHashcode + issue: https://github.com/elastic/elasticsearch/issues/112312 +- class: org.elasticsearch.blobcache.shared.SharedBlobCacheServiceTests + method: testGetMultiThreaded + issue: https://github.com/elastic/elasticsearch/issues/112314 +- class: org.elasticsearch.search.retriever.RankDocRetrieverBuilderIT + method: testRankDocsRetrieverWithCollapse + issue: https://github.com/elastic/elasticsearch/issues/112254 +- class: org.elasticsearch.search.ccs.CCSUsageTelemetryIT + issue: https://github.com/elastic/elasticsearch/issues/112324 +- class: org.elasticsearch.datastreams.logsdb.qa.StandardVersusLogsIndexModeRandomDataChallengeRestIT + method: testMatchAllQuery + issue: https://github.com/elastic/elasticsearch/issues/112374 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/rest-api/watcher/put-watch/line_120} + issue: https://github.com/elastic/elasticsearch/issues/99517 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testMultiIndexDelete + issue: https://github.com/elastic/elasticsearch/issues/112381 +- class: org.elasticsearch.xpack.searchablesnapshots.cache.shared.NodesCachesStatsIntegTests + method: testNodesCachesStats + issue: https://github.com/elastic/elasticsearch/issues/112384 # Examples: # @@ -171,4 +204,12 @@ tests: # method: "test {union_types.MultiIndexIpStringStatsInline}" # issue: "https://github.com/elastic/elasticsearch/..." # Note that this mutes for the unit-test-like CsvTests only. -# Muting for the integration tests needs to be done for each IT class individually. +# Muting all the integration tests can be done using the class "org.elasticsearch.xpack.esql.**". +# Consider however, that some tests are named as "test {file.test SYNC}" and "ASYNC" in the integration tests. +# To mute all 3 tests safely everywhere use: +# - class: "org.elasticsearch.xpack.esql.**" +# method: "test {union_types.MultiIndexIpStringStatsInline}" +# issue: "https://github.com/elastic/elasticsearch/..." +# - class: "org.elasticsearch.xpack.esql.**" +# method: "test {union_types.MultiIndexIpStringStatsInline *}" +# issue: "https://github.com/elastic/elasticsearch/..." diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 894f4ebe4bc54..dc429538fec3b 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -35,10 +35,7 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin { @Override - public ScriptEngine getScriptEngine( - Settings settings, - Collection> contexts - ) { + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { return new MyExpertScriptEngine(); } @@ -143,6 +140,9 @@ public ScoreScript newInstance(DocReader docReader) public double execute( ExplanationHolder explanation ) { + if(explanation != null) { + explanation.set("An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out."); + } return 0.0d; } }; @@ -166,6 +166,9 @@ public void setDocument(int docid) { } @Override public double execute(ExplanationHolder explanation) { + if(explanation != null) { + explanation.set("An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out."); + } if (postings.docID() != currentDocid) { /* * advance moved past the current doc, so this diff --git a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml index 89194d162872d..7436768416e00 100644 --- a/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml +++ b/plugins/examples/script-expert-scoring/src/yamlRestTest/resources/rest-api-spec/test/script_expert_scoring/20_score.yml @@ -4,26 +4,27 @@ setup: - do: indices.create: - index: test + index: test - do: index: - index: test - id: "1" - body: { "important_field": "foo" } + index: test + id: "1" + body: { "important_field": "foo" } - do: - index: - index: test - id: "2" - body: { "important_field": "foo foo foo" } + index: + index: test + id: "2" + body: { "important_field": "foo foo foo" } - do: - index: - index: test - id: "3" - body: { "important_field": "foo foo" } + index: + index: test + id: "3" + body: { "important_field": "foo foo" } - do: - indices.refresh: {} + indices.refresh: { } + --- "document scoring": - do: @@ -46,6 +47,39 @@ setup: term: "foo" - length: { hits.hits: 3 } - - match: {hits.hits.0._id: "2" } - - match: {hits.hits.1._id: "3" } - - match: {hits.hits.2._id: "1" } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + +--- +"document scoring with custom explanation": + + - requires: + cluster_features: [ "gte_v8.15.1" ] + reason: "bug fixed where explanations were throwing npe prior to 8.16" + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + explain: true + query: + function_score: + query: + match: + important_field: "foo" + functions: + - script_score: + script: + source: "pure_df" + lang: "expert_scripts" + params: + field: "important_field" + term: "foo" + + - length: { hits.hits: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.0._explanation.details.1.details.0.description: "An example optional custom description to explain details for this script's execution; we'll provide a default one if you leave this out." } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index dac8e051f25f8..8d50a9f7e29a9 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -584,7 +584,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { ); } if (fieldType.stored()) { - return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName()) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 49db5e3a1cd99..12c15a91be0e3 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -310,8 +310,11 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( getClusterStateFeatures(adminSearchClient), semanticNodeVersions ); - final TestFeatureService combinedTestFeatureService = featureId -> testFeatureService.clusterHasFeature(featureId) - && searchTestFeatureService.clusterHasFeature(featureId); + final TestFeatureService combinedTestFeatureService = (featureId, any) -> { + boolean adminFeature = testFeatureService.clusterHasFeature(featureId, any); + boolean searchFeature = searchTestFeatureService.clusterHasFeature(featureId, any); + return any ? adminFeature || searchFeature : adminFeature && searchFeature; + }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) .collect(Collectors.toSet()); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index e3639ffabf664..d3c8e64b85066 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -294,8 +294,11 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( getClusterStateFeatures(adminSearchClient), semanticNodeVersions ); - final TestFeatureService combinedTestFeatureService = featureId -> testFeatureService.clusterHasFeature(featureId) - && searchTestFeatureService.clusterHasFeature(featureId); + final TestFeatureService combinedTestFeatureService = (featureId, any) -> { + boolean adminFeature = testFeatureService.clusterHasFeature(featureId, any); + boolean searchFeature = searchTestFeatureService.clusterHasFeature(featureId, any); + return any ? adminFeature || searchFeature : adminFeature && searchFeature; + }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1a86947acab95..20c13ca92f5c1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -920,9 +920,7 @@ public void testEmptyShard() throws IOException { final String indexName = "test_empty_shard"; if (isRunningAgainstOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -1522,14 +1520,7 @@ public void testOperationBasedRecovery() throws Exception { */ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { if (isRunningAgainstOldCluster()) { - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build() - ); + createIndex(index, indexSettings(1, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); ensureGreen(index); int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { @@ -1549,9 +1540,7 @@ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { public void testResize() throws Exception { int numDocs; if (isRunningAgainstOldCluster()) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); + final Settings.Builder settings = indexSettings(3, 1); if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java new file mode 100644 index 0000000000000..739b4e302bb54 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.test.MapMatcher; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.ClassRule; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; + +public class LogsIndexModeFullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + @ClassRule + public static final ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .module("constant-keyword") + .module("data-streams") + .module("mapper-extras") + .module("x-pack-aggregate-metric") + .module("x-pack-stack") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + public LogsIndexModeFullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + private static final String BULK_INDEX_REQUEST = """ + { "create": {} } + { "@timestamp": "%s", "host.name": "%s", "method": "%s", "ip.address": "%s", "message": "%s" } + """; + + private static final String STANDARD_TEMPLATE = """ + { + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 500, + "template": { + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip.address": { + "type": "ip" + } + } + } + } + }"""; + + private static final String LOGS_TEMPLATE = """ + { + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 500, + "template": { + "settings": { + "index": { + "mode": "logsdb" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip.address": { + "type": "ip" + } + } + } + } + }"""; + + public void testLogsIndexing() throws IOException { + assumeTrue("Test uses data streams", oldClusterHasFeature(RestTestLegacyFeatures.DATA_STREAMS_SUPPORTED)); + + if (isRunningAgainstOldCluster()) { + assertOK(client().performRequest(putTemplate(client(), "logs-template", STANDARD_TEMPLATE))); + assertOK(client().performRequest(createDataStream("logs-apache-production"))); + final Response bulkIndexResponse = client().performRequest(bulkIndex("logs-apache-production", () -> { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < randomIntBetween(10, 20); i++) { + sb.append( + String.format( + BULK_INDEX_REQUEST, + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(Instant.now()), + randomFrom("foo", "bar"), + randomFrom("PUT", "POST", "GET"), + InetAddresses.toAddrString(randomIp(randomBoolean())), + randomAlphaOfLengthBetween(100, 200) + ) + ); + sb.append("\n"); + } + return sb.toString(); + })); + assertOK(bulkIndexResponse); + assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); + } else { + assertOK(client().performRequest(putTemplate(client(), "logs-template", LOGS_TEMPLATE))); + assertOK(client().performRequest(rolloverDataStream(client(), "logs-apache-production"))); + final Response bulkIndexResponse = client().performRequest(bulkIndex("logs-apache-production", () -> { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < randomIntBetween(10, 20); i++) { + sb.append( + String.format( + BULK_INDEX_REQUEST, + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(Instant.now()), + randomFrom("foo", "bar"), + randomFrom("PUT", "POST", "GET"), + InetAddresses.toAddrString(randomIp(randomBoolean())), + randomAlphaOfLengthBetween(100, 200) + ) + ); + sb.append("\n"); + } + return sb.toString(); + })); + assertOK(bulkIndexResponse); + assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); + + assertIndexMappingsAndSettings(0, Matchers.nullValue(), matchesMap().extraOk()); + assertIndexMappingsAndSettings( + 1, + Matchers.equalTo("logsdb"), + matchesMap().extraOk().entry("_source", Map.of("mode", "synthetic")) + ); + } + } + + private void assertIndexMappingsAndSettings(int backingIndex, final Matcher indexModeMatcher, final MapMatcher mappingsMatcher) + throws IOException { + assertThat( + getSettings(client(), getWriteBackingIndex(client(), "logs-apache-production", backingIndex)).get("index.mode"), + indexModeMatcher + ); + assertMap(getIndexMappingAsMap(getWriteBackingIndex(client(), "logs-apache-production", backingIndex)), mappingsMatcher); + } + + private static Request createDataStream(final String dataStreamName) { + return new Request("PUT", "/_data_stream/" + dataStreamName); + } + + private static Request bulkIndex(final String dataStreamName, final Supplier bulkIndexRequestSupplier) { + final Request request = new Request("POST", dataStreamName + "/_bulk"); + request.setJsonEntity(bulkIndexRequestSupplier.get()); + request.addParameter("refresh", "true"); + return request; + } + + private static Request putTemplate(final RestClient client, final String templateName, final String mappings) throws IOException { + final Request request = new Request("PUT", "/_index_template/" + templateName); + request.setJsonEntity(mappings); + return request; + } + + private static Request rolloverDataStream(final RestClient client, final String dataStreamName) throws IOException { + return new Request("POST", "/" + dataStreamName + "/_rollover"); + } + + @SuppressWarnings("unchecked") + private static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { + final Request request = new Request("GET", "_data_stream/" + dataStreamName); + final List dataStreams = (List) entityAsMap(client.performRequest(request)).get("data_streams"); + final Map dataStream = (Map) dataStreams.get(0); + final List> backingIndices = (List>) dataStream.get("indices"); + return backingIndices.get(backingIndex).get("index_name"); + } + + @SuppressWarnings("unchecked") + private static Map getSettings(final RestClient client, final String indexName) throws IOException { + final Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings"); + return ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings"); + } +} diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 6c924fe8e429a..825a866cdf2f8 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -75,10 +74,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); final List bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames); final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; @@ -165,10 +161,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); final List bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames); final String index = "test"; createIndex(index, settings.build()); @@ -251,10 +244,7 @@ public void testUpdateSnapshotStatus() throws Exception { String bwcNames = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.joining(",")); // Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes. - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10)) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put("index.routing.allocation.include._name", bwcNames); + Settings.Builder settings = indexSettings(between(5, 10), 1).put("index.routing.allocation.include._name", bwcNames); final String index = "test-snapshot-index"; createIndex(index, settings.build()); @@ -315,14 +305,7 @@ public void testSyncedFlushTransition() throws Exception { int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); final String index = "test_synced_flush"; - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes) - .build() - ); + createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build()); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try ( @@ -394,14 +377,7 @@ public void testFlushTransition() throws Exception { int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int totalShards = numShards * (numOfReplicas + 1); final String index = "test_flush"; - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes) - .build() - ); + createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build()); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try ( diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java index a33fc01d8446a..f2ca41e5ef8bc 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; @@ -43,9 +42,7 @@ private int indexDocs(int numDocs, int id) throws Exception { } public void testSingleValuedString() throws Exception { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + final Settings.Builder settings = indexSettings(2, 0); createIndex(index, settings.build()); // We want to trigger the usage oif cuckoo filters that happen only when there are // more than 10k distinct values in one shard. diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java index 461b731e518fb..808ebb764768f 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java @@ -12,8 +12,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; @@ -50,13 +48,7 @@ public void prepareTestData() throws IOException { allNodes.addAll(nodes.getNewNodes()); if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) { - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .build() - ); + createIndex(index, indexSettings(numShards, numReplicas).build()); for (int i = 0; i < numDocs; i++) { Request request = new Request("PUT", index + "/_doc/" + i); request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java index 1925b1e8f36ab..2ce9eef29d903 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/ConfigurationTests.java @@ -20,7 +20,6 @@ import static java.nio.file.attribute.PosixFilePermissions.fromString; import static org.elasticsearch.packaging.util.FileUtils.append; -import static org.hamcrest.Matchers.equalTo; import static org.junit.Assume.assumeFalse; public class ConfigurationTests extends PackagingTestCase { @@ -50,13 +49,15 @@ public void test20HostnameSubstitution() throws Exception { // security auto-config requires that the archive owner and the node process user be the same Platforms.onWindows(() -> sh.chown(confPath, installation.getOwner())); assertWhileRunning(() -> { - final String nameResponse = ServerUtils.makeRequest( - Request.Get("https://localhost:9200/_cat/nodes?h=name"), - "test_superuser", - "test_superuser_password", - ServerUtils.getCaCert(confPath) - ).strip(); - assertThat(nameResponse, equalTo("mytesthost")); + assertBusy(() -> { + final String nameResponse = ServerUtils.makeRequest( + Request.Get("https://localhost:9200/_cat/nodes?h=name"), + "test_superuser", + "test_superuser_password", + ServerUtils.getCaCert(confPath) + ).strip(); + assertEquals("mytesthost", nameResponse); + }); }); Platforms.onWindows(() -> sh.chown(confPath)); }); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index f9723f30cc371..18668b842b2d3 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1231,8 +1231,7 @@ public void test500Readiness() throws Exception { assertBusy(() -> assertTrue(readinessProbe(9399))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99508") - public void test600Interrupt() { + public void test600Interrupt() throws Exception { waitForElasticsearch(installation, "elastic", PASSWORD); final Result containerLogs = getContainerLogs(); @@ -1242,10 +1241,12 @@ public void test600Interrupt() { final int maxPid = infos.stream().map(i -> i.pid()).max(Integer::compareTo).get(); sh.run("bash -c 'kill -int " + maxPid + "'"); // send ctrl+c to all java processes - final Result containerLogsAfter = getContainerLogs(); - assertThat("Container logs should contain stopping ...", containerLogsAfter.stdout(), containsString("stopping ...")); - assertThat("No errors stdout", containerLogsAfter.stdout(), not(containsString("java.security.AccessControlException:"))); - assertThat("No errors stderr", containerLogsAfter.stderr(), not(containsString("java.security.AccessControlException:"))); + assertBusy(() -> { + final Result containerLogsAfter = getContainerLogs(); + assertThat("Container logs should contain stopping ...", containerLogsAfter.stdout(), containsString("stopping ...")); + assertThat("No errors stdout", containerLogsAfter.stdout(), not(containsString("java.security.AccessControlException:"))); + assertThat("No errors stderr", containerLogsAfter.stderr(), not(containsString("java.security.AccessControlException:"))); + }); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java new file mode 100644 index 0000000000000..82a17c54b6d69 --- /dev/null +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/MemoryLockingTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.ServerUtils; +import org.elasticsearch.packaging.util.docker.DockerRun; + +import java.util.Map; + +import static org.elasticsearch.packaging.util.docker.Docker.runContainer; +import static org.elasticsearch.packaging.util.docker.DockerRun.builder; + +public class MemoryLockingTests extends PackagingTestCase { + + public void test10Install() throws Exception { + install(); + } + + public void test20MemoryLockingEnabled() throws Exception { + configureAndRun( + Map.of( + "bootstrap.memory_lock", + "true", + "xpack.security.enabled", + "false", + "xpack.security.http.ssl.enabled", + "false", + "xpack.security.enrollment.enabled", + "false", + "discovery.type", + "single-node" + ) + ); + // TODO: very locking worked. logs? check memory of process? at least we know the process started successfully + stopElasticsearch(); + } + + public void configureAndRun(Map settings) throws Exception { + if (distribution().isDocker()) { + DockerRun builder = builder(); + settings.forEach(builder::envVar); + runContainer(distribution(), builder); + } else { + + for (var setting : settings.entrySet()) { + ServerUtils.addSettingToExistingConfiguration(installation.config, setting.getKey(), setting.getValue()); + } + ServerUtils.removeSettingFromExistingConfiguration(installation.config, "cluster.initial_master_nodes"); + } + + startElasticsearch(); + } +} diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 43d5ea842f9ef..fe2236adc4904 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -60,9 +60,7 @@ public class RecoveryIT extends AbstractRollingTestCase { public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -128,9 +126,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { switch (CLUSTER_TYPE) { case OLD -> { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + Settings.Builder settings = indexSettings(1, 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -217,9 +213,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { case OLD -> { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + Settings.Builder settings = indexSettings(1, 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -296,9 +290,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { public void testRecovery() throws Exception { final String index = "test_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + Settings.Builder settings = indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -413,9 +405,7 @@ public void testRecoveryClosedIndex() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { createIndex( indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + indexSettings(1, 1) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -453,13 +443,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT); if (indexExists(indexName) == false) { - createIndex( - indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build() - ); + createIndex(indexName, indexSettings(1, 0).build()); ensureGreen(indexName); closeIndex(indexName); } @@ -482,10 +466,7 @@ public void testClosedIndexNoopRecovery() throws Exception { if (CLUSTER_TYPE == ClusterType.OLD) { createIndex( indexName, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + indexSettings(1, 0).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") .put("index.routing.allocation.include._name", CLUSTER_NAME + "-0") .build() @@ -578,9 +559,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab public void testUpdateDoc() throws Exception { final String index = "test_update_doc"; if (CLUSTER_TYPE == ClusterType.OLD) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + Settings.Builder settings = indexSettings(1, 2); createIndex(index, settings.build()); indexDocs(index, 0, 100); } @@ -648,9 +627,7 @@ private void assertNoopRecoveries(String indexName, Predicate targetNode public void testOperationBasedRecovery() throws Exception { final String index = "test_operation_based_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + final Settings.Builder settings = indexSettings(1, 2); if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java new file mode 100644 index 0000000000000..1549789bcc44a --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.test.MapMatcher; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.ClassRule; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; + +public class LogsIndexModeRollingUpgradeIT extends AbstractRollingUpgradeTestCase { + + @ClassRule() + public static final ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .module("constant-keyword") + .module("data-streams") + .module("mapper-extras") + .module("x-pack-aggregate-metric") + .module("x-pack-stack") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("cluster.logsdb.enabled", "true") + .setting("stack.templates.enabled", "false") + .build(); + + public LogsIndexModeRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + private static final String BULK_INDEX_REQUEST = """ + { "create": {} } + { "@timestamp": "%s", "host.name": "%s", "method": "%s", "ip.address": "%s", "message": "%s" } + """; + + private static final String STANDARD_TEMPLATE = """ + { + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 500, + "template": { + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip.address": { + "type": "ip" + } + } + } + } + }"""; + + private static final String LOGS_TEMPLATE = """ + { + "index_patterns": [ "logs-*-*" ], + "data_stream": {}, + "priority": 500, + "template": { + "settings": { + "index": { + "mode": "logsdb" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip.address": { + "type": "ip" + } + } + } + } + }"""; + + public void testLogsIndexing() throws IOException { + if (isOldCluster()) { + assertOK(client().performRequest(putTemplate(client(), "logs-template", STANDARD_TEMPLATE))); + assertOK(client().performRequest(createDataStream("logs-apache-production"))); + final Response bulkIndexResponse = client().performRequest(bulkIndex("logs-apache-production", () -> { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < randomIntBetween(10, 20); i++) { + sb.append( + String.format( + BULK_INDEX_REQUEST, + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(Instant.now()), + randomFrom("foo", "bar"), + randomFrom("PUT", "POST", "GET"), + InetAddresses.toAddrString(randomIp(randomBoolean())), + randomIntBetween(20, 50) + ) + ); + sb.append("\n"); + } + return sb.toString(); + })); + assertOK(bulkIndexResponse); + assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); + } else if (isMixedCluster()) { + assertOK(client().performRequest(rolloverDataStream(client(), "logs-apache-production"))); + final Response bulkIndexResponse = client().performRequest(bulkIndex("logs-apache-production", () -> { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < randomIntBetween(10, 20); i++) { + sb.append( + String.format( + BULK_INDEX_REQUEST, + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(Instant.now()), + randomFrom("foo", "bar"), + randomFrom("PUT", "POST", "GET"), + InetAddresses.toAddrString(randomIp(randomBoolean())), + randomIntBetween(20, 50) + ) + ); + sb.append("\n"); + } + return sb.toString(); + })); + assertOK(bulkIndexResponse); + assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); + } else if (isUpgradedCluster()) { + assertOK(client().performRequest(putTemplate(client(), "logs-template", LOGS_TEMPLATE))); + assertOK(client().performRequest(rolloverDataStream(client(), "logs-apache-production"))); + final Response bulkIndexResponse = client().performRequest(bulkIndex("logs-apache-production", () -> { + final StringBuilder sb = new StringBuilder(); + for (int i = 0; i < randomIntBetween(10, 20); i++) { + sb.append( + String.format( + BULK_INDEX_REQUEST, + DateFormatter.forPattern(FormatNames.DATE_TIME.getName()).format(Instant.now()), + randomFrom("foo", "bar"), + randomFrom("PUT", "POST", "GET"), + InetAddresses.toAddrString(randomIp(randomBoolean())), + randomIntBetween(20, 50) + ) + ); + sb.append("\n"); + } + return sb.toString(); + })); + assertOK(bulkIndexResponse); + assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); + + assertIndexMappingsAndSettings(0, Matchers.nullValue(), matchesMap().extraOk()); + assertIndexMappingsAndSettings(1, Matchers.nullValue(), matchesMap().extraOk()); + assertIndexMappingsAndSettings(2, Matchers.nullValue(), matchesMap().extraOk()); + assertIndexMappingsAndSettings( + 3, + Matchers.equalTo("logsdb"), + matchesMap().extraOk().entry("_source", Map.of("mode", "synthetic")) + ); + } + } + + private void assertIndexMappingsAndSettings(int backingIndex, final Matcher indexModeMatcher, final MapMatcher mappingsMatcher) + throws IOException { + assertThat( + getSettings(client(), getWriteBackingIndex(client(), "logs-apache-production", backingIndex)).get("index.mode"), + indexModeMatcher + ); + assertMap(getIndexMappingAsMap(getWriteBackingIndex(client(), "logs-apache-production", backingIndex)), mappingsMatcher); + } + + private static Request createDataStream(final String dataStreamName) { + return new Request("PUT", "/_data_stream/" + dataStreamName); + } + + private static Request bulkIndex(final String dataStreamName, final Supplier bulkIndexRequestSupplier) { + final Request request = new Request("POST", dataStreamName + "/_bulk"); + request.setJsonEntity(bulkIndexRequestSupplier.get()); + request.addParameter("refresh", "true"); + return request; + } + + private static Request putTemplate(final RestClient client, final String templateName, final String mappings) throws IOException { + final Request request = new Request("PUT", "/_index_template/" + templateName); + request.setJsonEntity(mappings); + return request; + } + + private static Request rolloverDataStream(final RestClient client, final String dataStreamName) throws IOException { + return new Request("POST", "/" + dataStreamName + "/_rollover"); + } + + @SuppressWarnings("unchecked") + private static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { + final Request request = new Request("GET", "_data_stream/" + dataStreamName); + final List dataStreams = (List) entityAsMap(client.performRequest(request)).get("data_streams"); + final Map dataStream = (Map) dataStreams.get(0); + final List> backingIndices = (List>) dataStream.get("indices"); + return backingIndices.get(backingIndex).get("index_name"); + } + + @SuppressWarnings("unchecked") + private static Map getSettings(final RestClient client, final String indexName) throws IOException { + final Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings"); + return ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings"); + } +} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 593630546845d..2ee668c0e3fe1 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -59,10 +59,7 @@ public void testSnapshotBasedRecovery() throws Exception { final String repositoryName = "snapshot_based_recovery_repo"; final int numDocs = 200; if (isOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + Settings.Builder settings = indexSettings(1, 0).put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster createIndex(indexName, settings.build()); ensureGreen(indexName); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json index a96be0d63834e..f0537eee575d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/capabilities.json @@ -41,6 +41,11 @@ "capabilities": { "type": "string", "description": "Comma-separated list of arbitrary API capabilities to check" + }, + "local_only": { + "type": "boolean", + "description": "True if only the node being called should be considered", + "visibility": "private" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_data_stream.json index f8f3e238661c7..3df9232c3a5c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create_data_stream.json @@ -26,6 +26,14 @@ ] }, "params":{ + "timeout":{ + "type":"time", + "description":"Specify timeout for acknowledging the cluster state update" + }, + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json index 26f015f6028c6..dc9da50e0417b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json @@ -37,6 +37,10 @@ ], "default":"open", "description":"Whether wildcard expressions should get expanded to open or closed indices (default: open)" + }, + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json index e8aa0f00afa01..6f05af1485f98 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json @@ -41,6 +41,10 @@ "include_defaults":{ "type":"boolean", "description":"Return all relevant default configurations for the data stream (default: false)" + }, + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json index ec878f1fc9f62..59cd8521f275e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json @@ -47,6 +47,10 @@ "include_defaults":{ "type":"boolean", "description":"Return all relevant default configurations for the data stream (default: false)" + }, + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_to_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_to_data_stream.json index 4254ae79a23e8..879f086cc0675 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_to_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.migrate_to_data_stream.json @@ -26,6 +26,14 @@ ] }, "params":{ + "timeout":{ + "type":"time", + "description":"Specify timeout for acknowledging the cluster state update" + }, + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.promote_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.promote_data_stream.json index 5b51a900235ca..8c4c747fa8c16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.promote_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.promote_data_stream.json @@ -26,6 +26,10 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Specify timeout for connection to master" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json index f9afdbfb85f9c..8e80cd15d9ea2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json @@ -65,6 +65,10 @@ ], "default":"open", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc index 4b9399d052ce6..0ddac662e73ef 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc @@ -107,6 +107,10 @@ A `skip` section, on the other hand, defines certain conditions that, if met, wi - `features`: Only kept for a transition period, please use <> in the `requires` section instead. +Note that `skip` with `capabilities` or `cluster_features` will skip the test if *any* node in the cluster +has the feature or capability. `requires` will only run the test if *all* of the nodes in the cluster +have the feature or capability. + `requires` and `skip` sections must specify at least one of the options mentioned above. Unless only `test_runner_features` or legacy test runner `features` are specified, a `reason` must be given. @@ -134,7 +138,7 @@ other test runners to skip tests if they do not support the capabilities API yet path: /_api parameters: [param1, param2] capabilities: [cap1, cap2] - test_runner_feature: [capabilities] + test_runner_features: [capabilities] reason: Capability required to run test - do: ... test definitions ... @@ -142,13 +146,6 @@ other test runners to skip tests if they do not support the capabilities API yet The `capabilities` field is an array containing one or several capabilities checks. -*NOTE: If planning to `skip` on capabilities, keep in mind this might lead to unexpected results in _mixed cluster_ -tests!* A test is only skipped if *all* nodes support the requested capabilities, in _mixed clusters_ this might not be -the case: such a cluster can consist of a mix of nodes where some support respective capabilities and others don't, -additionally there might even be nodes that do not support the capabilities API at all. -In such cases the capabilities check will *not* succeed, hence the test is *not* skipped and might randomly hit one -of the nodes that actually support what you intended to skip on. This might then break your assumptions and fail the test. - Capabilities are declared as part of an implementation of `RestHandler`. Override the `supportedQueryParameters` and/or the `supportedCapabilities` methods: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml index f4f6245603aab..a2dfe3784d5ae 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/10_basic.yml @@ -229,3 +229,23 @@ - match: { items.0.index.error.type: illegal_argument_exception } - match: { items.0.index.error.reason: "no write index is defined for alias [test_index]. The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one being designated as a write index" } +--- +"Took is not orders of magnitude off": + - requires: + cluster_features: ["gte_v8.15.1"] + reason: "Bug reporting wrong took time introduced in 8.15.0, fixed in 8.15.1" + - do: + bulk: + body: + - index: + _index: took_test + - f: 1 + - index: + _index: took_test + - f: 2 + - index: + _index: took_test + - f: 3 + - match: { errors: false } + - gte: { took: 0 } + - lte: { took: 60000 } # Making sure we have a reasonable upper bound and that we're not for example returning nanoseconds diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index 94c19a4d69e17..5881ec83ebe85 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -1,25 +1,28 @@ --- "Metrics object indexing": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: - indices.put_template: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -65,20 +68,23 @@ --- "Root without subobjects": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: - indices.put_template: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -124,27 +130,30 @@ --- "Metrics object indexing with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: - indices.put_template: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -191,22 +200,25 @@ --- "Root without subobjects with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: - indices.put_template: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml new file mode 100644 index 0000000000000..414c24cfffd7d --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -0,0 +1,262 @@ +--- +"Metrics object indexing": + - requires: + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics.host.name: localhost + metrics.host.id: 1 + metrics.time: 10 + metrics.time.max: 100 + metrics.time.min: 1 + +--- +"Root with metrics": + - requires: + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Metrics object indexing with synthetic source": + - requires: + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Root without subobjects with synthetic source": + - requires: + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" + indices.put_index_template: + name: test + body: + index_patterns: test-* + template: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 22deb7012c4ed..fa08efe402b43 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1204,3 +1204,187 @@ nested object with stored array: - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } - match: { hits.hits.1._source.nested_array_stored.1.b.0.c: 20 } - match: { hits.hits.1._source.nested_array_stored.1.b.1.c: 200 } + +--- +empty nested object sorted as a first document: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + sort.field: "name" + sort.order: "asc" + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + nested: + type: nested + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "B", "nested": { "a": "b" } }' + - '{ "create": { } }' + - '{ "name": "A" }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.nested.a: "b" } + +--- +subobjects auto: + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: requires tracking ignored source and supporting subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + id: + type: integer + regular: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + nested: + type: nested + auto_obj: + type: object + subobjects: auto + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "foo": 10, "foo.bar": 100, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "foo": 20, "foo.bar": 200, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 3, "foo": 30, "foo.bar": 300, "nested": [ { "a": 10, "b": 20 }, { "a": 100, "b": 200 } ] }' + - '{ "create": { } }' + - '{ "id": 4, "auto_obj": { "foo": 40, "foo.bar": 400 } }' + + - match: { errors: false } + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.foo: 10 } + - match: { hits.hits.0._source.foo\.bar: 100 } + - match: { hits.hits.0._source.regular.span.id: "1" } + - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.foo: 20 } + - match: { hits.hits.1._source.foo\.bar: 200 } + - match: { hits.hits.1._source.stored.0.trace.id: a } + - match: { hits.hits.1._source.stored.0.span.id: "1" } + - match: { hits.hits.1._source.stored.1.trace.id: b } + - match: { hits.hits.1._source.stored.1.span.id: "1" } + - match: { hits.hits.2._source.id: 3 } + - match: { hits.hits.2._source.foo: 30 } + - match: { hits.hits.2._source.foo\.bar: 300 } + - match: { hits.hits.2._source.nested.0.a: 10 } + - match: { hits.hits.2._source.nested.0.b: 20 } + - match: { hits.hits.2._source.nested.1.a: 100 } + - match: { hits.hits.2._source.nested.1.b: 200 } + - match: { hits.hits.3._source.id: 4 } + - match: { hits.hits.3._source.auto_obj.foo: 40 } + - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } + +--- +# 112156 +stored field under object with store_array_source: + - requires: + cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix"] + reason: requires bug fix to be implemented + + - do: + indices.create: + index: test + body: + settings: + index: + sort.field: "name" + sort.order: "asc" + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + obj: + store_array_source: true + properties: + foo: + type: keyword + store: true + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "B", "obj": null }' + - '{ "create": { } }' + - '{ "name": "A", "obj": [ { "foo": "hello_from_the_other_side" } ] }' + + - match: { errors: false } + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.obj: [ { "foo": "hello_from_the_other_side" } ] } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.obj: null } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 45bcf64f98945..3d82539944a97 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -449,6 +449,115 @@ index: test-generic - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + +--- +"Composable index templates that include subobjects: auto at root": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-field + body: + template: + mappings: + properties: + parent.subfield: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-field + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent\.subfield.type: "keyword" } + - match: { test-generic.mappings.properties.message.type: "object" } + +--- +"Composable index templates that include subobjects: auto on arbitrary field": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + properties: + parent: + type: object + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-subfield + body: + template: + mappings: + properties: + parent: + properties: + child.grandchild: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-subfield + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + - match: { test-generic.mappings.properties.parent.properties.message.type: "object" } + + --- "Composition of component templates with different legal field mappings": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml index 80a8ccf0d1063..11ffbe1d8464d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml @@ -16,22 +16,22 @@ more than 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -66,8 +66,8 @@ more than 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -76,8 +76,8 @@ more than 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -86,8 +86,8 @@ more than 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -117,22 +117,22 @@ exactly 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -167,8 +167,8 @@ exactly 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -177,8 +177,8 @@ exactly 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -187,8 +187,8 @@ exactly 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -221,22 +221,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -271,8 +271,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -281,8 +281,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -291,8 +291,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml index 38bf9d72ef8ff..4c8d7736631c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml @@ -15,22 +15,22 @@ routing required: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -65,8 +65,8 @@ routing required: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -75,8 +75,8 @@ routing required: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -85,8 +85,8 @@ routing required: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -122,22 +122,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -172,8 +172,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -182,8 +182,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -192,8 +192,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_mapping.yml deleted file mode 100644 index af4b1a1999669..0000000000000 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_mapping.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- -stored _source mode is not supported: - - requires: - test_runner_features: [capabilities] - capabilities: - - method: PUT - path: /{index} - capabilities: [logsdb_index_mode] - reason: "Support for 'logsdb' index mode capability required" - - - skip: - known_issues: - - cluster_feature: "gte_v8.15.0" - fixed_by: "gte_v8.16.0" - reason: "Development of logs index mode spans 8.15 and 8.16" - - - do: - catch: bad_request - indices.create: - index: test-stored-source - body: - settings: - index: - mode: logsdb - mappings: - _source: - mode: stored - properties: - "@timestamp": - type: date - host.name: - type: keyword - - - match: { error.type: "mapper_parsing_exception" } - - match: { error.root_cause.0.type: "mapper_parsing_exception" } - - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logs] only support synthetic source" } - ---- -disabled _source is not supported: - - requires: - test_runner_features: [capabilities] - capabilities: - - method: PUT - path: /{index} - capabilities: [logsdb_index_mode] - reason: "Support for 'logsdb' index mode capability required" - - - skip: - known_issues: - - cluster_feature: "gte_v8.15.0" - fixed_by: "gte_v8.16.0" - reason: "Development of logs index mode spans 8.15 and 8.16" - - - do: - catch: bad_request - indices.create: - index: test-disabled-source - body: - settings: - index: - mode: logsdb - mappings: - _source: - enabled: false - properties: - "@timestamp": - type: date - host.name: - type: keyword - - - match: { error.type: "mapper_parsing_exception" } - - match: { error.root_cause.0.type: "mapper_parsing_exception" } - - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logsdb] only support synthetic source" } - - - do: - catch: bad_request - indices.create: - index: test-disabled-source - body: - settings: - index: - mode: logsdb - mappings: - _source: - mode: disabled - properties: - "@timestamp": - type: date - host.name: - type: keyword - - - match: { error.type: "mapper_parsing_exception" } - - match: { error.root_cause.0.type: "mapper_parsing_exception" } - - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logs] only support synthetic source" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml new file mode 100644 index 0000000000000..d209c839d904b --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml @@ -0,0 +1,94 @@ +--- +stored _source mode is not supported: + - requires: + test_runner_features: [capabilities] + capabilities: + - method: PUT + path: /{index} + capabilities: [logsdb_index_mode] + reason: "Support for 'logsdb' index mode capability required" + + - skip: + known_issues: + - cluster_feature: "gte_v8.15.0" + fixed_by: "gte_v8.16.0" + reason: "Development of logs index mode spans 8.15 and 8.16" + + - do: + catch: bad_request + indices.create: + index: test-stored-source + body: + settings: + index: + mode: logsdb + mappings: + _source: + mode: stored + properties: + "@timestamp": + type: date + host.name: + type: keyword + + - match: { error.type: "mapper_parsing_exception" } + - match: { error.root_cause.0.type: "mapper_parsing_exception" } + - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logsdb] only support synthetic source" } + +--- +disabled _source is not supported: + - requires: + test_runner_features: [capabilities] + capabilities: + - method: PUT + path: /{index} + capabilities: [logsdb_index_mode] + reason: "Support for 'logsdb' index mode capability required" + + - skip: + known_issues: + - cluster_feature: "gte_v8.15.0" + fixed_by: "gte_v8.16.0" + reason: "Development of logs index mode spans 8.15 and 8.16" + + - do: + catch: bad_request + indices.create: + index: test-disabled-source + body: + settings: + index: + mode: logsdb + mappings: + _source: + enabled: false + properties: + "@timestamp": + type: date + host.name: + type: keyword + + - match: { error.type: "mapper_parsing_exception" } + - match: { error.root_cause.0.type: "mapper_parsing_exception" } + - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logsdb] only support synthetic source" } + + - do: + catch: bad_request + indices.create: + index: test-disabled-source + body: + settings: + index: + mode: logsdb + mappings: + _source: + mode: disabled + properties: + "@timestamp": + type: date + host.name: + type: keyword + + - match: { error.type: "mapper_parsing_exception" } + - match: { error.root_cause.0.type: "mapper_parsing_exception" } + - match: { error.reason: "Failed to parse mapping: Indices with with index mode [logsdb] only support synthetic source" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index 2935c0c1c41b5..ff17a92ed0fcc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -46,6 +46,94 @@ keyword: docs.1._source: kwd: bar +--- +keyword with normalizer: + - requires: + cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] + reason: support for normalizer on keyword fields + - do: + indices.create: + index: test-keyword-with-normalizer + body: + settings: + analysis: + normalizer: + lowercase: + type: custom + filter: + - lowercase + mappings: + _source: + mode: synthetic + properties: + keyword: + type: keyword + normalizer: lowercase + keyword_with_ignore_above: + type: keyword + normalizer: lowercase + ignore_above: 10 + keyword_without_doc_values: + type: keyword + normalizer: lowercase + doc_values: false + + - do: + index: + index: test-keyword-with-normalizer + id: 1 + body: + keyword: "the Quick Brown Fox jumps over the lazy Dog" + keyword_with_ignore_above: "the Quick Brown Fox jumps over the lazy Dog" + keyword_without_doc_values: "the Quick Brown Fox jumps over the lazy Dog" + + - do: + index: + index: test-keyword-with-normalizer + id: 2 + body: + keyword: "The five BOXING wizards jump Quickly" + keyword_with_ignore_above: "The five BOXING wizards jump Quickly" + keyword_without_doc_values: "The five BOXING wizards jump Quickly" + + - do: + index: + index: test-keyword-with-normalizer + id: 3 + body: + keyword: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + keyword_with_ignore_above: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + keyword_without_doc_values: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + + - do: + mget: + index: test-keyword-with-normalizer + body: + ids: [ 1, 2, 3 ] + - match: { docs.0._index: "test-keyword-with-normalizer" } + - match: { docs.0._id: "1" } + - match: + docs.0._source: + keyword: "the Quick Brown Fox jumps over the lazy Dog" + keyword_with_ignore_above: "the Quick Brown Fox jumps over the lazy Dog" + keyword_without_doc_values: "the Quick Brown Fox jumps over the lazy Dog" + + - match: { docs.1._index: "test-keyword-with-normalizer" } + - match: { docs.1._id: "2" } + - match: + docs.1._source: + keyword: "The five BOXING wizards jump Quickly" + keyword_with_ignore_above: "The five BOXING wizards jump Quickly" + keyword_without_doc_values: "The five BOXING wizards jump Quickly" + + - match: { docs.2._index: "test-keyword-with-normalizer" } + - match: { docs.2._id: "3" } + - match: + docs.2._source: + keyword: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + keyword_with_ignore_above: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + keyword_without_doc_values: [ "May the FORCE be with You!", "Do or Do Not, There is no Try" ] + --- stored text: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 35089cc4c85a7..9d6e8da8c1e1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -509,6 +509,102 @@ - match: { nodes.$node_id.indices.indices.index1.mappings.total_estimated_overhead_in_bytes: 28672 } --- +"Lucene segment level fields stats": + + - requires: + cluster_features: ["mapper.segment_level_fields_stats"] + reason: "segment level fields stats" + + - do: + indices.create: + index: index1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + routing.rebalance.enable: none + mappings: + runtime: + a_source_field: + type: keyword + properties: + "@timestamp": + type: date + authors: + properties: + age: + type: long + company: + type: text + fields: + keyword: + type: keyword + ignore_above: 256 + name: + properties: + first_name: + type: keyword + full_name: + type: text + last_name: + type: keyword + link: + type: alias + path: url + title: + type: text + url: + type: keyword + - do: + cluster.state: {} + + - set: + routing_table.indices.index1.shards.0.0.node: node_id + + - do: + nodes.stats: { metric: _all, level: "indices", human: true } + + - do: + index: + index: index1 + body: { "title": "foo", "@timestamp": "2023-10-15T14:12:12" } + - do: + indices.flush: + index: index1 + - do: + nodes.stats: { metric: _all, level: "indices", human: true } + + - gte: { nodes.$node_id.indices.mappings.total_count: 28 } + - lte: { nodes.$node_id.indices.mappings.total_count: 29 } + - gte: { nodes.$node_id.indices.mappings.total_estimated_overhead_in_bytes: 28672 } + - lte: { nodes.$node_id.indices.mappings.total_estimated_overhead_in_bytes: 29696 } + - match: { nodes.$node_id.indices.mappings.total_segments: 1 } + - gte: { nodes.$node_id.indices.mappings.total_segment_fields: 28 } + - lte: { nodes.$node_id.indices.mappings.total_segment_fields: 29 } + - gte: { nodes.$node_id.indices.mappings.average_fields_per_segment: 28 } + - lte: { nodes.$node_id.indices.mappings.average_fields_per_segment: 29 } + + - do: + index: + index: index1 + body: { "title": "bar", "@timestamp": "2023-11-15T14:12:12" } + - do: + indices.flush: + index: index1 + - do: + nodes.stats: { metric: _all, level: "indices", human: true } + + - gte: { nodes.$node_id.indices.mappings.total_count: 28 } + - lte: { nodes.$node_id.indices.mappings.total_count: 29 } + - gte: { nodes.$node_id.indices.mappings.total_estimated_overhead_in_bytes: 28672 } + - lte: { nodes.$node_id.indices.mappings.total_estimated_overhead_in_bytes: 29696 } + - match: { nodes.$node_id.indices.mappings.total_segments: 2 } + - gte: { nodes.$node_id.indices.mappings.total_segment_fields: 56 } + - lte: { nodes.$node_id.indices.mappings.total_segment_fields: 58 } + - gte: { nodes.$node_id.indices.mappings.average_fields_per_segment: 28 } + - lte: { nodes.$node_id.indices.mappings.average_fields_per_segment: 29 } +--- + "indices mappings does not exist in shards level": - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index 72c6abab22600..d627be2fb15c3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -411,3 +411,53 @@ setup: - match: {hits.total.value: 1} - match: {hits.hits.0._id: "2"} +--- +"nested Knn search with required similarity appropriately filters inner_hits": + - requires: + cluster_features: "gte_v8.16.0" + reason: 'bugfix for 8.16' + + - do: + search: + index: test + body: + query: + nested: + path: nested + inner_hits: + size: 3 + _source: false + fields: + - nested.paragraph_id + query: + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + num_candidates: 3 + similarity: 10.5 + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} + - length: {hits.hits.0.inner_hits.nested.hits.hits: 1} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} + + - do: + search: + index: test + body: + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + num_candidates: 3 + k: 3 + similarity: 10.5 + inner_hits: + size: 3 + _source: false + fields: + - nested.paragraph_id + + - match: {hits.total.value: 1} + - match: {hits.hits.0._id: "2"} + - length: {hits.hits.0.inner_hits.nested.hits.hits: 1} + - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index df5d451e3a2e1..b3d86a066550e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -28,15 +28,14 @@ setup: type: hnsw m: 16 ef_construction: 200 - - do: index: index: test id: "1" body: name: cow.jpg - vector: [230.0, 300.33, -34.8988, 15.555, -200.0] - another_vector: [130.0, 115.0, -1.02, 15.555, -100.0] + vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] + another_vector: [ 130.0, 115.0, -1.02, 15.555, -100.0 ] - do: index: @@ -44,8 +43,8 @@ setup: id: "2" body: name: moose.jpg - vector: [-0.5, 100.0, -13, 14.8, -156.0] - another_vector: [-0.5, 50.0, -1, 1, 120] + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + another_vector: [ -0.5, 50.0, -1, 1, 120 ] - do: index: @@ -53,11 +52,11 @@ setup: id: "3" body: name: rabbit.jpg - vector: [0.5, 111.3, -13.0, 14.8, -156.0] - another_vector: [-0.5, 11.0, 0, 12, 111.0] + vector: [ 0.5, 111.3, -13.0, 14.8, -156.0 ] + another_vector: [ -0.5, 11.0, 0, 12, 111.0 ] - do: - indices.refresh: {} + indices.refresh: { } --- "kNN search only": @@ -71,15 +70,15 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.name.0: "moose.jpg" } - - match: {hits.hits.1._id: "3"} - - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1.fields.name.0: "rabbit.jpg" } --- "kNN multi-field search only": - requires: @@ -91,14 +90,14 @@ setup: body: fields: [ "name" ] knn: - - {field: vector, query_vector: [-0.5, 90.0, -10, 14.8, -156.0], k: 2, num_candidates: 3} - - {field: another_vector, query_vector: [-0.5, 11.0, 0, 12, 111.0], k: 2, num_candidates: 3} + - { field: vector, query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ], k: 2, num_candidates: 3 } + - { field: another_vector, query_vector: [ -0.5, 11.0, 0, 12, 111.0 ], k: 2, num_candidates: 3 } - - match: {hits.hits.0._id: "3"} - - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - - match: {hits.hits.1._id: "2"} - - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1.fields.name.0: "moose.jpg" } --- "kNN search plus query": - requires: @@ -111,21 +110,21 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 query: term: name: cow.jpg - - match: {hits.hits.0._id: "1"} - - match: {hits.hits.0.fields.name.0: "cow.jpg"} + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0.fields.name.0: "cow.jpg" } - - match: {hits.hits.1._id: "2"} - - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1.fields.name.0: "moose.jpg" } - - match: {hits.hits.2._id: "3"} - - match: {hits.hits.2.fields.name.0: "rabbit.jpg"} + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2.fields.name.0: "rabbit.jpg" } --- "kNN multi-field search with query": - requires: @@ -137,20 +136,20 @@ setup: body: fields: [ "name" ] knn: - - {field: vector, query_vector: [-0.5, 90.0, -10, 14.8, -156.0], k: 2, num_candidates: 3} - - {field: another_vector, query_vector: [-0.5, 11.0, 0, 12, 111.0], k: 2, num_candidates: 3} + - { field: vector, query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ], k: 2, num_candidates: 3 } + - { field: another_vector, query_vector: [ -0.5, 11.0, 0, 12, 111.0 ], k: 2, num_candidates: 3 } query: term: name: cow.jpg - - match: {hits.hits.0._id: "3"} - - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - - match: {hits.hits.1._id: "1"} - - match: {hits.hits.1.fields.name.0: "cow.jpg"} + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1.fields.name.0: "cow.jpg" } - - match: {hits.hits.2._id: "2"} - - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2.fields.name.0: "moose.jpg" } --- "kNN search with filter": - requires: @@ -163,16 +162,16 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 filter: term: name: "rabbit.jpg" - - match: {hits.total.value: 1} - - match: {hits.hits.0._id: "3"} - - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - do: search: @@ -181,7 +180,7 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 filter: @@ -190,7 +189,7 @@ setup: - term: _id: 2 - - match: {hits.total.value: 0} + - match: { hits.total.value: 0 } --- "kNN search with explicit search_type": @@ -206,7 +205,7 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 @@ -216,7 +215,7 @@ setup: --- "kNN search in _knn_search endpoint": - skip: - features: ["allowed_warnings"] + features: [ "allowed_warnings" ] - do: allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." @@ -226,22 +225,22 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.name.0: "moose.jpg" } - - match: {hits.hits.1._id: "3"} - - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1.fields.name.0: "rabbit.jpg" } --- "kNN search with filter in _knn_search endpoint": - requires: cluster_features: "gte_v8.2.0" reason: 'kNN with filtering added in 8.2' - test_runner_features: ["allowed_warnings"] + test_runner_features: [ "allowed_warnings" ] - do: allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." @@ -251,16 +250,16 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 filter: term: name: "rabbit.jpg" - - match: {hits.total.value: 1} - - match: {hits.hits.0._id: "3"} - - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - do: allowed_warnings: @@ -271,7 +270,7 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 2 num_candidates: 3 filter: @@ -280,7 +279,7 @@ setup: - term: _id: 2 - - match: {hits.total.value: 0} + - match: { hits.total.value: 0 } --- "Test nonexistent field is match none": @@ -298,7 +297,7 @@ setup: k: 2 num_candidates: 3 - - length: {hits.hits: 0} + - length: { hits.hits: 0 } - do: indices.create: @@ -347,12 +346,12 @@ setup: k: 3 field: vector similarity: 11 - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] - - length: {hits.hits: 1} + - length: { hits.hits: 1 } - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.name.0: "moose.jpg" } --- "Vector similarity with filter only": - requires: @@ -368,13 +367,13 @@ setup: k: 3 field: vector similarity: 11 - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] - filter: {"term": {"name": "moose.jpg"}} + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + filter: { "term": { "name": "moose.jpg" } } - - length: {hits.hits: 1} + - length: { hits.hits: 1 } - - match: {hits.hits.0._id: "2"} - - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.0.fields.name.0: "moose.jpg" } - do: search: @@ -386,10 +385,10 @@ setup: k: 3 field: vector similarity: 110 - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] - filter: {"term": {"name": "cow.jpg"}} + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + filter: { "term": { "name": "cow.jpg" } } - - length: {hits.hits: 0} + - length: { hits.hits: 0 } --- "Knn search with mip": - requires: @@ -421,7 +420,7 @@ setup: id: "1" body: name: cow.jpg - vector: [230.0, 300.33, -34.8988, 15.555, -200.0] + vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] - do: index: @@ -429,7 +428,7 @@ setup: id: "2" body: name: moose.jpg - vector: [-0.5, 100.0, -13, 14.8, -156.0] + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] - do: index: @@ -437,10 +436,10 @@ setup: id: "3" body: name: rabbit.jpg - vector: [0.5, 111.3, -13.0, 14.8, -156.0] + vector: [ 0.5, 111.3, -13.0, 14.8, -156.0 ] - do: - indices.refresh: {} + indices.refresh: { } - do: search: @@ -451,16 +450,16 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] - - length: {hits.hits: 3} - - match: {hits.hits.0._id: "1"} - - close_to: {hits.hits.0._score: {value: 58694.902, error: 0.01}} - - match: {hits.hits.1._id: "3"} - - close_to: {hits.hits.1._score: {value: 34702.79, error: 0.01}} - - match: {hits.hits.2._id: "2"} - - close_to: {hits.hits.2._score: {value: 33686.29, error: 0.01}} + - length: { hits.hits: 3 } + - match: { hits.hits.0._id: "1" } + - close_to: { hits.hits.0._score: { value: 58694.902, error: 0.01 } } + - match: { hits.hits.1._id: "3" } + - close_to: { hits.hits.1._score: { value: 34702.79, error: 0.01 } } + - match: { hits.hits.2._id: "2" } + - close_to: { hits.hits.2._score: { value: 33686.29, error: 0.01 } } - do: search: @@ -471,14 +470,14 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] filter: { "term": { "name": "moose.jpg" } } - - length: {hits.hits: 1} - - match: {hits.hits.0._id: "2"} - - close_to: {hits.hits.0._score: {value: 33686.29, error: 0.01}} + - length: { hits.hits: 1 } + - match: { hits.hits.0._id: "2" } + - close_to: { hits.hits.0._score: { value: 33686.29, error: 0.01 } } --- "Knn search with _name": - requires: @@ -493,7 +492,7 @@ setup: fields: [ "name" ] knn: field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] k: 3 num_candidates: 3 _name: "my_knn_query" @@ -504,15 +503,41 @@ setup: _name: "my_query" - - match: {hits.hits.0._id: "1"} - - match: {hits.hits.0.fields.name.0: "cow.jpg"} - - match: {hits.hits.0.matched_queries.0: "my_knn_query"} - - match: {hits.hits.0.matched_queries.1: "my_query"} + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0.fields.name.0: "cow.jpg" } + - match: { hits.hits.0.matched_queries.0: "my_knn_query" } + - match: { hits.hits.0.matched_queries.1: "my_query" } + + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1.fields.name.0: "moose.jpg" } + - match: { hits.hits.1.matched_queries.0: "my_knn_query" } + + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2.fields.name.0: "rabbit.jpg" } + - match: { hits.hits.2.matched_queries.0: "my_knn_query" } - - match: {hits.hits.1._id: "2"} - - match: {hits.hits.1.fields.name.0: "moose.jpg"} - - match: {hits.hits.1.matched_queries.0: "my_knn_query"} +--- +"kNN search on empty index should return 0 results and not an error": + - requires: + cluster_features: "gte_v8.15.1" + reason: 'Error fixed in 8.15.1' + - do: + indices.create: + index: test_empty + body: + mappings: + properties: + vector: + type: dense_vector + - do: + search: + index: test_empty + body: + fields: [ "name" ] + knn: + field: vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 2 + num_candidates: 3 - - match: {hits.hits.2._id: "3"} - - match: {hits.hits.2.fields.name.0: "rabbit.jpg"} - - match: {hits.hits.2.matched_queries.0: "my_knn_query"} + - match: { hits.total.value: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml index 82fb18a879346..99bd001bd95e2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/230_interval_query.yml @@ -21,6 +21,10 @@ setup: - '{"text" : "Baby its cold there outside"}' - '{"index": {"_index": "test", "_id": "4"}}' - '{"text" : "Outside it is cold and wet"}' + - '{"index": {"_index": "test", "_id": "5"}}' + - '{"text" : "the big bad wolf"}' + - '{"index": {"_index": "test", "_id": "6"}}' + - '{"text" : "the big wolf"}' --- "Test ordered matching": @@ -444,4 +448,31 @@ setup: prefix: out - match: { hits.total.value: 3 } +--- +"Test rewrite disjunctions": + - do: + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - "match": + "query": "the" + - "any_of": + "intervals": + - "match": + "query": "big" + - "match": + "query": "big bad" + - "match": + "query": "wolf" + max_gaps: 0 + ordered: true + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "6" } + - match: { hits.hits.1._id: "5" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 703f2a0352fbd..c120bed2d369d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1125,3 +1125,55 @@ fetch geo_point: - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } - match: { hits.hits.0.fields.root\.subfield.0: 'child' } - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + +--- +"Test with subobjects: auto": + - requires: + cluster_features: "mapper.subobjects_auto" + reason: requires support for subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + subobjects: auto + properties: + message: + type: object + subobjects: auto + enabled: false + + - do: + index: + index: test + refresh: true + body: > + { + "root": "parent", + "root.subfield": "child", + "message": { + "foo": 10, + "foo.bar": 20 + } + } + - match: {result: "created"} + + - do: + search: + index: test + body: + query: + term: + root.subfield: + value: 'child' + fields: + - field: 'root*' + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.root.0: 'parent' } + - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } + - match: { hits.hits.0.fields.root\.subfield.0: 'child' } + - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + - is_false: hits.hits.0.fields.message + - match: { hits.hits.0._source.message.foo: 10 } + - match: { hits.hits.0._source.message.foo\.bar: 20 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml new file mode 100644 index 0000000000000..d3d0a3bb4df70 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml @@ -0,0 +1,309 @@ +setup: + - requires: + cluster_features: ["gte_v8.16.0"] + reason: Lenient handling of updateable synonyms by default is introduced in 8.16.0 +--- +"Load index with an invalid synonym rule": + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar, baz" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + filter: + my_synonym_filter: + type: synonym + synonyms_set: set1 + updateable: true + my_stop_filter: + type: stop + stopwords: ["baz"] + analyzer: + my_analyzer: + type: custom + tokenizer: whitespace + filter: [ lowercase, my_stop_filter, my_synonym_filter ] + mappings: + properties: + my_field: + type: text + search_analyzer: my_analyzer + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + +--- +"Reload index with invalid synonym rule": + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + filter: + my_synonym_filter: + type: synonym + synonyms_set: set1 + updateable: true + my_stop_filter: + type: stop + stopwords: ["baz"] + analyzer: + my_analyzer: + type: custom + tokenizer: whitespace + filter: [ lowercase, my_stop_filter, my_synonym_filter ] + mappings: + properties: + my_field: + type: text + search_analyzer: my_analyzer + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar, baz" + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + + - do: + indices.close: + index: test_index + + - match: { acknowledged: true } + - length: { indices: 1 } + + - do: + indices.open: + index: test_index + wait_for_active_shards: all + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + +--- +"Load index with an invalid synonym rule with lenient set to false": + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar, baz" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + filter: + my_synonym_filter: + type: synonym + synonyms_set: set1 + updateable: true + lenient: false + my_stop_filter: + type: stop + stopwords: ["baz"] + analyzer: + my_analyzer: + type: custom + tokenizer: whitespace + filter: [ lowercase, my_stop_filter, my_synonym_filter ] + mappings: + properties: + my_field: + type: text + search_analyzer: my_analyzer + + - match: { acknowledged: true } + - match: { shards_acknowledged: false } + + - do: + indices.stats: { index: test_index } + + - length: { indices: 0 } + +--- +"Reload index with an invalid synonym rule with lenient set to false": + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + filter: + my_synonym_filter: + type: synonym + synonyms_set: set1 + updateable: true + lenient: false + my_stop_filter: + type: stop + stopwords: [ "baz" ] + analyzer: + my_analyzer: + type: custom + tokenizer: whitespace + filter: [ lowercase, my_stop_filter, my_synonym_filter ] + mappings: + properties: + my_field: + type: text + search_analyzer: my_analyzer + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + + - do: + synonyms.put_synonym: + id: set1 + body: + synonyms_set: + synonyms: "foo => bar, baz" + + - do: + indices.stats: { index: test_index } + + - match: { indices.test_index.health: "green" } + + - do: + indices.analyze: + index: test_index + body: + analyzer: my_analyzer + text: foo + + - length: { tokens: 1 } + - match: { tokens.0.token: bar } + + - do: + indices.close: + index: test_index + + - match: { acknowledged: true } + - length: { indices: 1 } + + - do: + indices.open: + index: test_index + wait_for_active_shards: all + + - match: { acknowledged: true } + - match: { shards_acknowledged: false } + + - do: + indices.stats: { index: test_index } + + - length: { indices: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml index f6d09a4540eb7..89ad933370e1c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml @@ -187,3 +187,44 @@ setup: search_analyzer: my_analyzer - match: { acknowledged: true } - match: { shards_acknowledged: false } + +--- +"Load empty synonyms set from index for an analyzer": + - do: + synonyms.put_synonym: + id: empty_set + body: + synonyms_set: [] + + - do: + indices.create: + index: empty_set_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + filter: + my_synonym_filter: + type: synonym + synonyms_set: empty_set + updateable : true + analyzer: + my_analyzer: + type: custom + tokenizer: standard + filter: [ lowercase, my_synonym_filter ] + mappings: + properties: + my_field: + type: text + search_analyzer: my_analyzer + + - match: { acknowledged: true } + - match: { shards_acknowledged: true } + + - do: + indices.stats: { index: empty_set_index } + + - match: { indices.empty_set_index.health: "green" } diff --git a/server/build.gradle b/server/build.gradle index deadd0a330ef8..c3ac4d9704091 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -267,7 +267,7 @@ tasks.named("dependencyLicenses").configure { tasks.named("licenseHeaders").configure { // Ignore our vendored version of Google Guice - excludes << 'org/elasticsearch/common/inject/**/*' + excludes << 'org/elasticsearch/injection/guice/**/*' // Ignore temporary copies of impending 8.7 Lucene classes excludes << 'org/apache/lucene/search/RegExp87*' excludes << 'org/apache/lucene/search/RegexpQuery87*' diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index e1804368c2cad..69768d6efe3bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CollectionUtils; @@ -33,6 +32,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.CancellableTask; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 1fda9c67a0beb..0c3dac0f99b6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -161,7 +161,7 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); - prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); + prepareCreate("test1").setSettings(indexSettings(2, 1)).get(); response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); @@ -179,7 +179,7 @@ public void testIndicesShardStats() throws ExecutionException, InterruptedExcept assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); - prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); + prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); ensureGreen(); response = clusterAdmin().prepareClusterStats().get(); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index 60462863dd09a..2acb38e9bda1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -17,8 +17,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 26a430123ccd9..27f0cd408e7fb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -306,10 +306,7 @@ public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { public void testFailureToCreateIndexCleansUpIndicesService() { final int numReplicas = internalCluster().numDataNodes(); - Settings settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas) - .build(); + Settings settings = indexSettings(1, numReplicas).build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get()); ActionRequestBuilder builder = indicesAdmin().prepareCreate("test-idx-2") @@ -328,10 +325,7 @@ public void testFailureToCreateIndexCleansUpIndicesService() { */ public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { final int numReplicas = internalCluster().numDataNodes(); - Settings settings = Settings.builder() - .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas)) - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas) + Settings settings = indexSettings(1, numReplicas).put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas)) .build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).get()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 27fd54c39cc95..22549a1562dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -276,6 +276,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards) + .put("index.routing.rebalance.enable", EnableAllocationDecider.Rebalance.NONE) ).get(); ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index 3560b74189d1d..415cfff459a67 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -68,7 +69,9 @@ public void testClusterBlockMessageHasIndexName() { ClusterBlockException e = expectThrows(ClusterBlockException.class, prepareIndex("test").setId("1").setSource("foo", "bar")); assertEquals( "index [test] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " - + "index has read-only-allow-delete block];", + + "index has read-only-allow-delete block; for more information, see " + + ReferenceDocs.FLOOD_STAGE_WATERMARK + + "];", e.getMessage() ); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 4d52383bfc4e1..16f8f51cb8aae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -211,9 +211,8 @@ public void testRolloverWithIndexSettingsWithoutPrefix() throws Exception { assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get()); indexDoc("test_index-2", "1", "field", "value"); flush("test_index-2"); - final Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") - .settings(settings) + .settings(indexSettings(1, 0).build()) .alias(new Alias("extra_alias")) .get(); assertThat(response.getOldIndex(), equalTo("test_index-2")); @@ -665,7 +664,7 @@ public void testRolloverWithClosedWriteIndex() throws Exception { assertAcked(prepareCreate(openNonwriteIndex).addAlias(new Alias(aliasName)).get()); assertAcked(prepareCreate(closedIndex).addAlias(new Alias(aliasName)).get()); assertAcked(prepareCreate(writeIndexPrefix + "000001").addAlias(new Alias(aliasName).writeIndex(true)).get()); - + ensureGreen(openNonwriteIndex, closedIndex, writeIndexPrefix + "000001"); index(closedIndex, null, "{\"foo\": \"bar\"}"); index(aliasName, null, "{\"foo\": \"bar\"}"); index(aliasName, null, "{\"foo\": \"bar\"}"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java index d531686bb5207..0ed585164750a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkAfterWriteFsyncFailureIT.java @@ -10,7 +10,6 @@ import org.apache.lucene.tests.mockfile.FilterFileChannel; import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; @@ -59,13 +58,7 @@ public void testFsyncFailureDoesNotAdvanceLocalCheckpoints() { client().admin() .indices() .prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build() - ) + .setSettings(indexSettings(1, 0).put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)) .setMapping("key", "type=keyword", "val", "type=long") .get(); ensureGreen(indexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index a9a5bb074c9ac..da2dfc50d7fe9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -10,12 +10,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -54,11 +58,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; public class PointInTimeIT extends ESIntegTestCase { @@ -84,7 +91,7 @@ public void testBasic() { prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); @@ -130,7 +137,7 @@ public void testMultipleIndices() { prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { int moreDocs = randomIntBetween(10, 50); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { @@ -212,7 +219,7 @@ public void testRelocation() throws Exception { prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); @@ -264,7 +271,7 @@ public void testPointInTimeNotFound() throws Exception { prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)).getPointInTimeId(); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { @@ -305,7 +312,7 @@ public void testIndexNotFound() { prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse( prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), @@ -348,7 +355,7 @@ public void testCanMatch() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); - BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { for (String node : internalCluster().nodesInclude("test")) { for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { @@ -415,7 +422,7 @@ public void testPartialResults() throws Exception { prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); @@ -447,7 +454,7 @@ public void testPITTiebreak() throws Exception { } } refresh("index-*"); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)).getPointInTimeId(); try { for (int size = 1; size <= numIndex; size++) { SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; @@ -532,6 +539,176 @@ public void testOpenPITConcurrentShardRequests() throws Exception { } } + public void testMissingShardsWithPointInTime() throws Exception { + final Settings nodeAttributes = Settings.builder().put("node.attr.foo", "bar").build(); + final String masterNode = internalCluster().startMasterOnlyNode(nodeAttributes); + List dataNodes = internalCluster().startDataOnlyNodes(2, nodeAttributes); + + final String index = "my_test_index"; + // tried to have randomIntBetween(3, 10) but having more shards than 3 was taking forever and throwing timeouts + final int numShards = 3; + final int numReplicas = 0; + // create an index with numShards shards and 0 replicas + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put("index.routing.allocation.require.foo", "bar") + .build() + ); + + // index some documents + int numDocs = randomIntBetween(10, 50); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // create a PIT when all shards are present + OpenPointInTimeResponse pointInTimeResponse = openPointInTime(new String[] { index }, TimeValue.timeValueMinutes(1)); + try { + // ensure that the PIT created has all the shards there + assertThat(numShards, equalTo(pointInTimeResponse.getTotalShards())); + assertThat(numShards, equalTo(pointInTimeResponse.getSuccessfulShards())); + assertThat(0, equalTo(pointInTimeResponse.getFailedShards())); + assertThat(0, equalTo(pointInTimeResponse.getSkippedShards())); + + // make a request using the above PIT + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponse.getPointInTimeId())), + resp -> { + // ensure that al docs are returned + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponse.getPointInTimeId())); + assertHitCount(resp, numDocs); + } + ); + + // pick up a random data node to shut down + final String randomDataNode = randomFrom(dataNodes); + + // find which shards to relocate + final String nodeId = admin().cluster().prepareNodesInfo(randomDataNode).get().getNodes().get(0).getNode().getId(); + Set shardsToRelocate = new HashSet<>(); + for (ShardStats stats : admin().indices().prepareStats(index).get().getShards()) { + if (nodeId.equals(stats.getShardRouting().currentNodeId())) { + shardsToRelocate.add(stats.getShardRouting().shardId().id()); + } + } + + final int shardsRemoved = shardsToRelocate.size(); + + // shut down the random data node + internalCluster().stopNode(randomDataNode); + + // ensure that the index is Red + ensureRed(index); + + // verify that not all documents can now be retrieved + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + }); + + // create a PIT when some shards are missing + OpenPointInTimeResponse pointInTimeResponseOneNodeDown = openPointInTime( + new String[] { index }, + TimeValue.timeValueMinutes(10), + true + ); + try { + // assert that some shards are indeed missing from PIT + assertThat(pointInTimeResponseOneNodeDown.getTotalShards(), equalTo(numShards)); + assertThat(pointInTimeResponseOneNodeDown.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getFailedShards(), equalTo(shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getSkippedShards(), equalTo(0)); + + // ensure that the response now contains fewer documents than the total number of indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + // add another node to the cluster and re-allocate the shards + final String newNodeName = internalCluster().startDataOnlyNode(nodeAttributes); + try { + for (int shardId : shardsToRelocate) { + ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand(index, shardId, newNodeName, true)); + } + ensureGreen(TimeValue.timeValueMinutes(2), index); + + // index some more documents + for (int i = numDocs; i < numDocs * 2; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // ensure that we now see at least numDocs results from the updated index + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards)); + assertThat(resp.getFailedShards(), equalTo(0)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, greaterThan((long) numDocs)); + }); + + // ensure that when using the previously created PIT, we'd see the same number of documents as before regardless of the + // newly indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertThat(resp.getTotalShards(), equalTo(numShards)); + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.getShardFailures().length, equalTo(shardsRemoved)); + for (var failure : resp.getShardFailures()) { + assertTrue(shardsToRelocate.contains(failure.shardId())); + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + assertNotNull(resp.getHits().getTotalHits()); + // we expect less documents as the newly indexed ones should not be part of the PIT + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + Exception exc = expectThrows( + Exception.class, + () -> prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())) + .setAllowPartialSearchResults(false) + .get() + ); + assertThat(exc.getCause().getMessage(), containsString("missing shards")); + + } finally { + internalCluster().stopNode(newNodeName); + } + } finally { + closePointInTime(pointInTimeResponseOneNodeDown.getPointInTimeId()); + } + + } finally { + closePointInTime(pointInTimeResponse.getPointInTimeId()); + internalCluster().stopNode(masterNode); + for (String dataNode : dataNodes) { + internalCluster().stopNode(dataNode); + } + } + } + @SuppressWarnings({ "rawtypes", "unchecked" }) private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int size, SortBuilder... sorts) throws Exception { Set seen = new HashSet<>(); @@ -590,10 +767,14 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s assertThat(seen.size(), equalTo(expectedNumDocs)); } - private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { - OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - return response.getPointInTimeId(); + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive) { + return openPointInTime(indices, keepAlive, false); + } + + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive, boolean allowPartialSearchResults) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive) + .allowPartialSearchResults(allowPartialSearchResults); + return client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); } private void closePointInTime(BytesReference readerId) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index e5dca62a97494..88d934973fc49 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -9,7 +9,9 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.Strings; @@ -23,7 +25,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.junit.annotations.TestIssueLogging; import java.util.ArrayList; import java.util.Arrays; @@ -39,10 +40,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; -@TestIssueLogging( - issueUrl = "https://github.com/elastic/elasticsearch/issues/109830", - value = "org.elasticsearch.action.search:TRACE," + "org.elasticsearch.search.SearchService:TRACE" -) public class SearchProgressActionListenerIT extends ESSingleNodeTestCase { private List shards; @@ -197,7 +194,11 @@ private static List createRandomIndices(Client client) { client.prepareIndex(indexName).setSource("number", i, "foo", "bar").get(); } client.admin().indices().prepareRefresh("index-*").get(); - ClusterSearchShardsResponse resp = client.admin().cluster().prepareSearchShards("index-*").get(); + ClusterSearchShardsResponse resp = safeExecute( + client, + TransportClusterSearchShardsAction.TYPE, + new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "index-*") + ); return Arrays.stream(resp.getGroups()).map(e -> new SearchShard(null, e.getShardId())).sorted().toList(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java index 7211585d766f4..e568b51e43b2e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java @@ -26,12 +26,12 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.Task; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionBypassCircuitBreakerOnReplicaIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionBypassCircuitBreakerOnReplicaIT.java new file mode 100644 index 0000000000000..f9f047232e8c6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionBypassCircuitBreakerOnReplicaIT.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.BiFunction; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class TransportReplicationActionBypassCircuitBreakerOnReplicaIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(TestPlugin.class); + } + + public static class Request extends ReplicationRequest { + public Request(ShardId shardId) { + super(shardId); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public String toString() { + return "test-request"; + } + } + + public static class Response extends ReplicationResponse { + public Response() {} + + public Response(StreamInput in) throws IOException { + super(in); + } + } + + public static class TestAction extends TransportReplicationAction { + private static final String ACTION_NAME = "internal:test-replication-action"; + private static final ActionType TYPE = new ActionType<>(ACTION_NAME); + + @Inject + public TestAction( + Settings settings, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters + ) { + super( + settings, + ACTION_NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + Request::new, + Request::new, + threadPool.executor(ThreadPool.Names.GENERIC), + SyncGlobalCheckpointAfterOperation.DoNotSync, + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.BypassCircuitBreaker + ); + } + + @Override + protected Response newResponseInstance(StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void shardOperationOnPrimary( + Request shardRequest, + IndexShard primary, + ActionListener> listener + ) { + listener.onResponse(new PrimaryResult<>(shardRequest, new Response())); + } + + @Override + protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, ActionListener listener) { + listener.onResponse(new ReplicaResult()); + } + } + + public static class TestPlugin extends Plugin implements ActionPlugin { + + public TestPlugin() {} + + @Override + public List> getActions() { + return List.of(new ActionHandler<>(TestAction.TYPE, TestAction.class)); + } + } + + private enum PrimaryOrReplica implements BiFunction { + PRIMARY { + @Override + public String apply(String primaryName, String replicaName) { + return primaryName; + } + }, + REPLICA { + @Override + public String apply(String primaryName, String replicaName) { + return replicaName; + } + } + } + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() { + maxOutCircuitBreakersAndExecuteAction(PrimaryOrReplica.REPLICA); + } + + public void testActionFailsWhenPrimaryCircuitBreakersAreAtCapacity() { + AssertionError assertionError = assertThrows( + AssertionError.class, + () -> maxOutCircuitBreakersAndExecuteAction(PrimaryOrReplica.PRIMARY) + ); + assertNotNull( + "Not caused by CircuitBreakingException " + ExceptionsHelper.stackTrace(assertionError), + ExceptionsHelper.unwrap(assertionError, CircuitBreakingException.class) + ); + } + + private void maxOutCircuitBreakersAndExecuteAction(PrimaryOrReplica nodeToMaxOutCircuitBreakers) { + internalCluster().startMasterOnlyNodes(2); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + String coordinator = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + ensureGreen("test"); + + try ( + var ignored = fullyAllocateCircuitBreakerOnNode( + nodeToMaxOutCircuitBreakers.apply(primary, replica), + CircuitBreaker.IN_FLIGHT_REQUESTS + ) + ) { + PlainActionFuture testActionResult = new PlainActionFuture<>(); + client(coordinator).execute(TestAction.TYPE, new Request(new ShardId(resolveIndex("test"), 0)), testActionResult); + safeGet(testActionResult); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index c4737468a766c..81f26243fb6fb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -24,6 +23,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -107,7 +107,8 @@ public TestAction( Request::new, threadPool.executor(ThreadPool.Names.GENERIC), SyncGlobalCheckpointAfterOperation.DoNotSync, - PrimaryActionExecution.RejectOnOverload + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.SubjectToCircuitBreaker ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index cf8decc5655ec..ca4bde8fd0f72 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -16,7 +16,9 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.Strings; @@ -1013,7 +1015,10 @@ public void testArtificialDocWithPreference() throws InterruptedException, IOExc indexRandom(true, prepareIndex("test").setId("1").setSource("field1", "random permutation")); // Get search shards - ClusterSearchShardsResponse searchShardsResponse = clusterAdmin().prepareSearchShards("test").get(); + ClusterSearchShardsResponse searchShardsResponse = safeExecute( + TransportClusterSearchShardsAction.TYPE, + new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test") + ); List shardIds = Arrays.stream(searchShardsResponse.getGroups()).map(s -> s.getShardId().id()).toList(); // request termvectors of artificial document from each shard diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 7c13171ea76ad..dc93aaa814018 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -395,14 +394,7 @@ public void testMessageLogging() { assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; - indicesAdmin().prepareCreate(indexName) - .setWaitForActiveShards(ActiveShardCount.NONE) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - ) - .get(); + indicesAdmin().prepareCreate(indexName).setWaitForActiveShards(ActiveShardCount.NONE).setSettings(indexSettings(2, 1)).get(); try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) { dryRunMockLog.addExpectation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 8b551e00caeeb..85a04ee6f1851 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -13,6 +13,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; @@ -548,15 +550,15 @@ public void testSearchRouting() throws Exception { } // search-shards API for (int i = 0; i < 10; i++) { - final var search = clusterAdmin().prepareSearchShards(INDEX_NAME); + final var search = new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, INDEX_NAME); switch (randomIntBetween(0, 2)) { - case 0 -> search.setRouting(randomAlphaOfLength(10)); - case 1 -> search.setRouting(randomSearchPreference(routingTableWatcher.numShards, internalCluster().getNodeNames())); + case 0 -> search.routing(randomAlphaOfLength(10)); + case 1 -> search.routing(randomSearchPreference(routingTableWatcher.numShards, internalCluster().getNodeNames())); default -> { // do nothing } } - ClusterSearchShardsGroup[] groups = search.get().getGroups(); + ClusterSearchShardsGroup[] groups = safeExecute(client(), TransportClusterSearchShardsAction.TYPE, search).getGroups(); for (ClusterSearchShardsGroup group : groups) { for (ShardRouting shr : group.getShards()) { String profileKey = "[" + shr.currentNodeId() + "][" + INDEX_NAME + "][" + shr.id() + "]"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index fd5e54631fd7a..7464f83cb2814 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -106,7 +106,7 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { } updateClusterSettings(settings); // Create an index with 10 shards so we can check allocation for it - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 10).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(10, 0))); ensureGreen("test"); assertBusy(() -> { @@ -184,7 +184,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { updateClusterSettings(builder); // Create an index with 6 shards so we can check allocation for it - prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)).get(); + prepareCreate("test").setSettings(indexSettings(6, 0)).get(); ensureGreen("test"); { @@ -269,7 +269,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception .map(RoutingNode::nodeId) .toList(); - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0))); ensureGreen("test"); @@ -355,10 +355,10 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { assertAcked( prepareCreate("test").setSettings( - Settings.builder() - .put("number_of_shards", 6) - .put("number_of_replicas", 0) - .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), nodeIds.get(2)) + indexSettings(6, 0).put( + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), + nodeIds.get(2) + ) ) ); ensureGreen("test"); @@ -422,7 +422,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception .map(RoutingNode::nodeId) .toList(); - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); + assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0))); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index d52498043366a..895bd6932fdb9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -7,9 +7,13 @@ */ package org.elasticsearch.cluster.shards; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -38,10 +42,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { }; } - public void testSingleShardAllocation() throws Exception { + public void testSingleShardAllocation() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0).put("index.routing.allocation.include.tag", "A")).get(); ensureGreen(); - ClusterSearchShardsResponse response = clusterAdmin().prepareSearchShards("test").get(); + ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test")); assertThat(response.getGroups().length, equalTo(1)); assertThat(response.getGroups()[0].getShardId().getIndexName(), equalTo("test")); assertThat(response.getGroups()[0].getShardId().getId(), equalTo(0)); @@ -49,7 +53,7 @@ public void testSingleShardAllocation() throws Exception { assertThat(response.getNodes().length, equalTo(1)); assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId())); - response = clusterAdmin().prepareSearchShards("test").setRouting("A").get(); + response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test").routing("A")); assertThat(response.getGroups().length, equalTo(1)); assertThat(response.getGroups()[0].getShardId().getIndexName(), equalTo("test")); assertThat(response.getGroups()[0].getShardId().getId(), equalTo(0)); @@ -59,25 +63,25 @@ public void testSingleShardAllocation() throws Exception { } - public void testMultipleShardsSingleNodeAllocation() throws Exception { + public void testMultipleShardsSingleNodeAllocation() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(4, 0).put("index.routing.allocation.include.tag", "A")).get(); ensureGreen(); - ClusterSearchShardsResponse response = clusterAdmin().prepareSearchShards("test").get(); + ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test")); assertThat(response.getGroups().length, equalTo(4)); assertThat(response.getGroups()[0].getShardId().getIndexName(), equalTo("test")); assertThat(response.getNodes().length, equalTo(1)); assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId())); - response = clusterAdmin().prepareSearchShards("test").setRouting("ABC").get(); + response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test").routing("ABC")); assertThat(response.getGroups().length, equalTo(1)); - response = clusterAdmin().prepareSearchShards("test").setPreference("_shards:2").get(); + response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test").preference("_shards:2")); assertThat(response.getGroups().length, equalTo(1)); assertThat(response.getGroups()[0].getShardId().getId(), equalTo(2)); } - public void testMultipleIndicesAllocation() throws Exception { + public void testMultipleIndicesAllocation() { createIndex("test1", 4, 1); createIndex("test2", 4, 1); indicesAdmin().prepareAliases() @@ -86,7 +90,7 @@ public void testMultipleIndicesAllocation() throws Exception { .get(); clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - ClusterSearchShardsResponse response = clusterAdmin().prepareSearchShards("routing_alias").get(); + ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "routing_alias")); assertThat(response.getGroups().length, equalTo(2)); assertThat(response.getGroups()[0].getShards().length, equalTo(2)); assertThat(response.getGroups()[1].getShards().length, equalTo(2)); @@ -128,7 +132,7 @@ public void testClusterSearchShardsWithBlocks() { )) { try { enableIndexBlock("test-blocks", blockSetting); - ClusterSearchShardsResponse response = clusterAdmin().prepareSearchShards("test-blocks").get(); + ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test-blocks")); assertThat(response.getGroups().length, equalTo(numShards.numPrimaries)); } finally { disableIndexBlock("test-blocks", blockSetting); @@ -138,9 +142,28 @@ public void testClusterSearchShardsWithBlocks() { // Request is blocked try { enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); - assertBlocked(clusterAdmin().prepareSearchShards("test-blocks")); + assertBlocked( + null, + asInstanceOf( + ClusterBlockException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + ClusterSearchShardsResponse.class, + l -> client().execute( + TransportClusterSearchShardsAction.TYPE, + new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "test-blocks"), + l + ) + ) + ) + ) + ); } finally { disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); } } + + private static ClusterSearchShardsResponse safeExecute(ClusterSearchShardsRequest request) { + return safeExecute(TransportClusterSearchShardsAction.TYPE, request); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 3202f5513e9ac..31dd002a6af7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.index.IndexVersion; @@ -152,7 +153,9 @@ public void testIncreaseReplicasOverLimit() { + firstShardCount + "]/[" + dataNodes * shardsPerNode - + "] maximum normal shards open;"; + + "] maximum normal shards open; for more information, see " + + ReferenceDocs.MAX_SHARDS_PER_NODE + + ";"; assertEquals(expectedError, e.getMessage()); } Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); @@ -211,7 +214,9 @@ public void testChangingMultipleIndicesOverLimit() { + totalShardsBefore + "]/[" + dataNodes * shardsPerNode - + "] maximum normal shards open;"; + + "] maximum normal shards open; for more information, see " + + ReferenceDocs.MAX_SHARDS_PER_NODE + + ";"; assertEquals(expectedError, e.getMessage()); } Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); @@ -403,7 +408,9 @@ private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentE + currentShards + "]/[" + maxShards - + "] maximum normal shards open;"; + + "] maximum normal shards open; for more information, see " + + ReferenceDocs.MAX_SHARDS_PER_NODE + + ";"; assertEquals(expectedError, e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 71c6ef956c4d4..cad5c8f524bc7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -17,9 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -33,9 +30,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; -import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; - /** * Tests for discovery during disruptions. */ @@ -138,13 +132,7 @@ public void testElectMasterWithLatestVersion() throws Exception { internalCluster().setDisruptionScheme(isolatePreferredMaster); isolatePreferredMaster.startDisrupting(); - client(randomFrom(nonPreferredNodes)).admin() - .indices() - .prepareCreate("test") - .setSettings( - Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - ) - .get(); + client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).get(); internalCluster().clearDisruptionScheme(false); internalCluster().setDisruptionScheme(isolateAllNodes); @@ -259,46 +247,31 @@ public void testJoinWaitsForCircuitBreaker() throws InterruptedException { logger.info("--> master [{}], victim [{}]", masterName, victimName); // fill up the circuit breaker to breaking point - final var circuitBreaker = internalCluster().getInstance(CircuitBreakerService.class, victimName) - .getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - long allocationSize = 1; - while (true) { - try { - circuitBreaker.addEstimateBytesAndMaybeBreak(allocationSize, "test"); - } catch (CircuitBreakingException e) { - circuitBreaker.addWithoutBreaking(allocationSize); - break; - } - allocationSize <<= 1; - assert 0 <= allocationSize; - } - - // drop the victim from the cluster with a network disruption - final var masterTransportService = MockTransportService.getInstance(masterName); - masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, victimName)); - logger.info("--> waiting for victim's departure"); - ensureStableCluster(2, masterName); - - // verify that the victim sends no joins while the circuit breaker is breaking - final var victimTransportService = MockTransportService.getInstance(victimName); - victimTransportService.addSendBehavior((connection, requestId, action, request, options) -> { - assertNotEquals(action, JoinHelper.JOIN_ACTION_NAME); - connection.sendRequest(requestId, action, request, options); - }); - - // fix the network disruption - logger.info("--> removing network disruption"); - masterTransportService.clearAllRules(); - ensureStableCluster(2, masterName); - - // permit joins again - victimTransportService.addSendBehavior(null); - - // release the breaker - logger.info("--> releasing allocations from circuit breaker"); - while (0 < allocationSize) { - circuitBreaker.addWithoutBreaking(-allocationSize); - allocationSize >>= 1; + try (var ignored = fullyAllocateCircuitBreakerOnNode(victimName, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + + // drop the victim from the cluster with a network disruption + final var masterTransportService = MockTransportService.getInstance(masterName); + masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, victimName)); + logger.info("--> waiting for victim's departure"); + ensureStableCluster(2, masterName); + + // verify that the victim sends no joins while the circuit breaker is breaking + final var victimTransportService = MockTransportService.getInstance(victimName); + victimTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + assertNotEquals(action, JoinHelper.JOIN_ACTION_NAME); + connection.sendRequest(requestId, action, request, options); + }); + + // fix the network disruption + logger.info("--> removing network disruption"); + masterTransportService.clearAllRules(); + ensureStableCluster(2, masterName); + + // permit joins again + victimTransportService.addSendBehavior(null); + + // release the breaker + logger.info("--> releasing allocations from circuit breaker"); } logger.info("--> waiting for cluster to heal"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 3baabe4cc888e..26573644790fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -441,13 +441,9 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .indices() .prepareCreate("test") .setSettings( - Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 1) - + indexSettings(1, 1) // disable merges to keep segments the same .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) - // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index a96801b707808..76c501df1fd29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -849,13 +848,7 @@ public void testAvoidWrappingSearcherInMultiGet() { SearcherWrapperPlugin.enabled = true; assertAcked( prepareCreate("test").setMapping("f", "type=keyword") - .setSettings( - Settings.builder() - .put("index.refresh_interval", "-1") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.routing.rebalance.enable", "none") - ) + .setSettings(indexSettings(1, 0).put("index.refresh_interval", "-1").put("index.routing.rebalance.enable", "none")) ); // start tracking translog locations in the live version map { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java index 224f0dd4dc822..a77c01e199942 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetFromTranslogActionIT.java @@ -13,9 +13,7 @@ import org.elasticsearch.action.get.TransportGetFromTranslogAction; import org.elasticsearch.action.get.TransportGetFromTranslogAction.Response; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -34,11 +32,8 @@ public void testGetFromTranslog() throws Exception { assertAcked( prepareCreate(INDEX).setMapping("field1", "type=keyword,store=true") .setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - // A GetFromTranslogAction runs only Stateless where there is only one active indexing shard. - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + // A GetFromTranslogAction runs only Stateless where there is only one active indexing shard. + indexSettings(1, 0).put("index.refresh_interval", -1) ) .addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java index b09a0284eba05..6da2802ce7cc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/ShardMultiGetFomTranslogActionIT.java @@ -15,9 +15,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -42,11 +40,8 @@ public class ShardMultiGetFomTranslogActionIT extends ESIntegTestCase { public void testShardMultiGetFromTranslog() throws Exception { assertAcked( prepareCreate(INDEX).setSettings( - Settings.builder() - .put("index.refresh_interval", -1) - // A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard. - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + // A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard. + indexSettings(1, 0).put("index.refresh_interval", -1) ).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) ); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java index de783a28bce1d..89107c54ee820 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java @@ -7,10 +7,10 @@ */ package org.elasticsearch.index; -import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.injection.guice.AbstractModule; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..0bab5be245ecf --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.stream.Stream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class BackgroundRetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() throws Exception { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + final ClusterState state = internalCluster().clusterService().state(); + final Index testIndex = resolveIndex("test"); + final ShardId testIndexShardZero = new ShardId(testIndex, 0); + final String testLeaseId = "test-lease/123"; + RetentionLeases newLeases = addTestLeaseToRetentionLeases(primary, testIndex, testLeaseId); + internalCluster().getInstance(RetentionLeaseSyncer.class, primary) + .backgroundSync( + testIndexShardZero, + state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard().allocationId().getId(), + state.term(), + newLeases + ); + + // Wait for test lease to appear on replica + IndicesService replicaIndicesService = internalCluster().getInstance(IndicesService.class, replica); + assertBusy(() -> { + RetentionLeases retentionLeases = replicaIndicesService.indexService(testIndex).getShard(0).getRetentionLeases(); + assertTrue(retentionLeases.contains(testLeaseId)); + }); + } + } + + private static RetentionLeases addTestLeaseToRetentionLeases(String primaryNodeName, Index index, String leaseId) { + IndicesService primaryIndicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName); + RetentionLeases currentLeases = primaryIndicesService.indexService(index).getShard(0).getRetentionLeases(); + RetentionLease newLease = new RetentionLease(leaseId, 0, System.currentTimeMillis(), "test source"); + return new RetentionLeases( + currentLeases.primaryTerm(), + currentLeases.version() + 1, + Stream.concat(currentLeases.leases().stream(), Stream.of(newLease)).toList() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..2d8f455792172 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + public void testActionCompletesWhenPrimaryIndexingPressureIsAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (Releasable ignored = fullyAllocatePrimaryIndexingCapacityOnNode(primary)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + private static void assertThatRetentionLeaseSyncCompletesSuccessfully(String primaryNodeName) { + RetentionLeaseSyncer instance = internalCluster().getInstance(RetentionLeaseSyncer.class, primaryNodeName); + PlainActionFuture retentionLeaseSyncResult = new PlainActionFuture<>(); + ClusterState state = internalCluster().clusterService().state(); + ShardId testIndexShardZero = new ShardId(resolveIndex("test"), 0); + ShardRouting primaryShard = state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard(); + instance.sync( + testIndexShardZero, + primaryShard.allocationId().getId(), + state.term(), + RetentionLeases.EMPTY, + retentionLeaseSyncResult + ); + safeGet(retentionLeaseSyncResult); + } + + /** + * Fully allocate primary indexing capacity on a node + * + * @param targetNode The name of the node on which to allocate + * @return A {@link Releasable} which will release the capacity when closed + */ + private static Releasable fullyAllocatePrimaryIndexingCapacityOnNode(String targetNode) { + return internalCluster().getInstance(IndexingPressure.class, targetNode) + .markPrimaryOperationStarted( + 1, + IndexingPressure.MAX_INDEXING_BYTES.get(internalCluster().getInstance(Settings.class, targetNode)).getBytes() + 1, + true + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java new file mode 100644 index 0000000000000..ff2117ea93bb9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.memory.breaker; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matchers; +import org.junit.After; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Stream; + +import static org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker.CIRCUIT_BREAKER_TYPE_ATTRIBUTE; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING; +import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = true) +public class HierarchyCircuitBreakerTelemetryIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(TestTelemetryPlugin.class); + } + + public void testCircuitBreakerTripCountMetric() { + final Settings circuitBreakerSettings = Settings.builder() + .put(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 100, ByteSizeUnit.BYTES) + .put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) + .put(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), 150, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) + .build(); + String dataNodeName = null; + String masterNodeName = null; + try { + // NOTE: we start with empty circuitBreakerSettings to allow cluster formation + masterNodeName = internalCluster().startMasterOnlyNode(Settings.EMPTY); + dataNodeName = internalCluster().startDataOnlyNode(Settings.EMPTY); + assertTrue(clusterAdmin().prepareUpdateSettings().setPersistentSettings(circuitBreakerSettings).get().isAcknowledged()); + assertTrue( + client().admin() + .indices() + .prepareCreate("test") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .get() + .isAcknowledged() + ); + assertEquals( + RestStatus.OK.getStatus(), + client().prepareIndex("test").setWaitForActiveShards(1).setSource("field", "value").get().status().getStatus() + ); + } catch (CircuitBreakingException cbex) { + final List dataNodeMeasurements = getMeasurements(dataNodeName); + final List masterNodeMeasurements = getMeasurements(masterNodeName); + final List allMeasurements = Stream.concat(dataNodeMeasurements.stream(), masterNodeMeasurements.stream()) + .toList(); + assertThat(allMeasurements, Matchers.not(Matchers.empty())); + final Measurement measurement = allMeasurements.get(0); + assertThat(1L, Matchers.equalTo(measurement.getLong())); + assertThat(1L, Matchers.equalTo(measurement.value())); + assertThat(Map.of(CIRCUIT_BREAKER_TYPE_ATTRIBUTE, "inflight_requests"), Matchers.equalTo(measurement.attributes())); + assertThat(true, Matchers.equalTo(measurement.isLong())); + return; + } + fail("Expected exception not thrown"); + } + + @After + public void resetClusterSetting() { + final var circuitBreakerSettings = Settings.builder() + .putNull(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey()); + updateClusterSettings(circuitBreakerSettings); + } + + private List getMeasurements(String nodeName) { + final TestTelemetryPlugin telemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .toList() + .get(0); + return Measurement.combine( + Stream.of(telemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) + .flatMap(Function.identity()) + .toList() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index d56e4a372c17c..fbbeec4b4e9ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -914,10 +914,7 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, prepareCreate( name, nodeCount, - Settings.builder() - .put("number_of_shards", shardCount) - .put("number_of_replicas", replicaCount) - .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) + indexSettings(shardCount, replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) ) ); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index 5201e4ab3d812..a7a2af57ef810 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -227,10 +227,7 @@ public void testCloseOpenAliasMultipleIndices() { public void testOpenWaitingForActiveShardsFailed() throws Exception { Client client = client(); - Settings settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .build(); + Settings settings = indexSettings(1, 0).build(); assertAcked(client.admin().indices().prepareCreate("test").setSettings(settings).get()); assertAcked(client.admin().indices().prepareClose("test").get()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index 542a4cd2c4c92..db26d630fefea 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -350,7 +350,7 @@ public void beforeIndexFoldersDeleted(Index index, IndexSettings indexSettings, @Override public void beforeShardFoldersDeleted(ShardId shardId, IndexSettings indexSettings, Path[] shardPaths) { - deletedShards.computeIfAbsent(shardId.getIndex(), i -> new ArrayList<>()).add(shardId); + deletedShards.computeIfAbsent(shardId.getIndex(), i -> Collections.synchronizedList(new ArrayList<>())).add(shardId); } }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java deleted file mode 100644 index 7797371a2823b..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/DocumentSizeObserverIT.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugins.internal; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.plugins.EnginePlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.FilterXContentParserWrapper; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.xcontent.XContentFactory.cborBuilder; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DocumentSizeObserverIT extends ESIntegTestCase { - - private static String TEST_INDEX_NAME = "test-index-name"; - - @Override - protected boolean addMockInternalEngine() { - return false; - } - - @Override - protected Collection> nodePlugins() { - return List.of(TestDocumentParsingProviderPlugin.class, TestEnginePlugin.class); - } - - // the assertions are done in plugin which is static and will be created by ES server. - // hence a static flag to make sure it is indeed used - public static boolean hasWrappedParser; - public static AtomicLong COUNTER = new AtomicLong(); - - public void testDocumentIsReportedUponBulk() throws Exception { - hasWrappedParser = false; - client().index( - new IndexRequest(TEST_INDEX_NAME).id("1").source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); - assertTrue(hasWrappedParser); - assertDocumentReported(); - - hasWrappedParser = false; - // the format of the request does not matter - client().index( - new IndexRequest(TEST_INDEX_NAME).id("2").source(cborBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); - assertTrue(hasWrappedParser); - assertDocumentReported(); - - hasWrappedParser = false; - // white spaces does not matter - client().index(new IndexRequest(TEST_INDEX_NAME).id("3").source(""" - { - "test": - - "I am sam i am" - } - """, XContentType.JSON)).actionGet(); - assertTrue(hasWrappedParser); - assertDocumentReported(); - } - - private void assertDocumentReported() throws Exception { - assertBusy(() -> assertThat(COUNTER.get(), equalTo(5L))); - COUNTER.set(0); - } - - public static class TestEnginePlugin extends Plugin implements EnginePlugin { - DocumentParsingProvider documentParsingProvider; - - @Override - public Collection createComponents(PluginServices services) { - documentParsingProvider = services.documentParsingProvider(); - return super.createComponents(services); - } - - @Override - public Optional getEngineFactory(IndexSettings indexSettings) { - return Optional.of(config -> new InternalEngine(config) { - @Override - public IndexResult index(Index index) throws IOException { - IndexResult result = super.index(index); - - DocumentSizeReporter documentParsingReporter = documentParsingProvider.newDocumentSizeReporter( - shardId.getIndexName(), - config().getMapperService(), - DocumentSizeAccumulator.EMPTY_INSTANCE - ); - ParsedDocument parsedDocument = index.parsedDoc(); - documentParsingReporter.onIndexingCompleted(parsedDocument); - - return result; - } - }); - } - } - - public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { - - public TestDocumentParsingProviderPlugin() {} - - @Override - public DocumentParsingProvider getDocumentParsingProvider() { - return new DocumentParsingProvider() { - @Override - public DocumentSizeObserver newDocumentSizeObserver(DocWriteRequest request) { - return new TestDocumentSizeObserver(0L); - } - - @Override - public DocumentSizeReporter newDocumentSizeReporter( - String indexName, - MapperService mapperService, - DocumentSizeAccumulator documentSizeAccumulator - ) { - return new TestDocumentSizeReporter(indexName); - } - }; - } - } - - public static class TestDocumentSizeReporter implements DocumentSizeReporter { - - private final String indexName; - - public TestDocumentSizeReporter(String indexName) { - this.indexName = indexName; - } - - @Override - public void onIndexingCompleted(ParsedDocument parsedDocument) { - COUNTER.addAndGet(parsedDocument.getDocumentSizeObserver().normalisedBytesParsed()); - assertThat(indexName, equalTo(TEST_INDEX_NAME)); - } - } - - public static class TestDocumentSizeObserver implements DocumentSizeObserver { - long counter = 0; - - public TestDocumentSizeObserver(long counter) { - this.counter = counter; - } - - @Override - public XContentParser wrapParser(XContentParser xContentParser) { - hasWrappedParser = true; - return new FilterXContentParserWrapper(xContentParser) { - - @Override - public Token nextToken() throws IOException { - counter++; - return super.nextToken(); - } - }; - } - - @Override - public long normalisedBytesParsed() { - return counter; - } - - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java new file mode 100644 index 0000000000000..16fb618e97dfc --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins.internal; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.FilterXContentParserWrapper; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.xcontent.XContentFactory.cborBuilder; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class XContentMeteringParserDecoratorIT extends ESIntegTestCase { + + private static String TEST_INDEX_NAME = "test-index-name"; + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return List.of(TestDocumentParsingProviderPlugin.class, TestEnginePlugin.class); + } + + // the assertions are done in plugin which is static and will be created by ES server. + // hence a static flag to make sure it is indeed used + public static boolean hasWrappedParser; + public static AtomicLong COUNTER = new AtomicLong(); + + public void testDocumentIsReportedUponBulk() throws Exception { + hasWrappedParser = false; + client().index( + new IndexRequest(TEST_INDEX_NAME).id("1").source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) + ).actionGet(); + assertTrue(hasWrappedParser); + assertDocumentReported(); + + hasWrappedParser = false; + // the format of the request does not matter + client().index( + new IndexRequest(TEST_INDEX_NAME).id("2").source(cborBuilder().startObject().field("test", "I am sam i am").endObject()) + ).actionGet(); + assertTrue(hasWrappedParser); + assertDocumentReported(); + + hasWrappedParser = false; + // white spaces does not matter + client().index(new IndexRequest(TEST_INDEX_NAME).id("3").source(""" + { + "test": + + "I am sam i am" + } + """, XContentType.JSON)).actionGet(); + assertTrue(hasWrappedParser); + assertDocumentReported(); + } + + private void assertDocumentReported() throws Exception { + assertBusy(() -> assertThat(COUNTER.get(), equalTo(5L))); + COUNTER.set(0); + } + + public static class TestEnginePlugin extends Plugin implements EnginePlugin { + DocumentParsingProvider documentParsingProvider; + + @Override + public Collection createComponents(PluginServices services) { + documentParsingProvider = services.documentParsingProvider(); + return super.createComponents(services); + } + + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + return Optional.of(config -> new InternalEngine(config) { + @Override + public IndexResult index(Index index) throws IOException { + IndexResult result = super.index(index); + + DocumentSizeReporter documentParsingReporter = documentParsingProvider.newDocumentSizeReporter( + shardId.getIndexName(), + config().getMapperService(), + DocumentSizeAccumulator.EMPTY_INSTANCE + ); + ParsedDocument parsedDocument = index.parsedDoc(); + documentParsingReporter.onIndexingCompleted(parsedDocument); + + return result; + } + }); + } + } + + public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { + + public TestDocumentParsingProviderPlugin() {} + + @Override + public DocumentParsingProvider getDocumentParsingProvider() { + return new DocumentParsingProvider() { + @Override + public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + return new TestXContentMeteringParserDecorator(0L); + } + + @Override + public DocumentSizeReporter newDocumentSizeReporter( + String indexName, + MapperService mapperService, + DocumentSizeAccumulator documentSizeAccumulator + ) { + return new TestDocumentSizeReporter(indexName); + } + }; + } + } + + public static class TestDocumentSizeReporter implements DocumentSizeReporter { + + private final String indexName; + + public TestDocumentSizeReporter(String indexName) { + this.indexName = indexName; + } + + @Override + public void onIndexingCompleted(ParsedDocument parsedDocument) { + long delta = parsedDocument.getNormalizedSize().ingestedBytes(); + if (delta > 0) { + COUNTER.addAndGet(delta); + } + assertThat(indexName, equalTo(TEST_INDEX_NAME)); + } + } + + public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { + long counter = 0; + + public TestXContentMeteringParserDecorator(long counter) { + this.counter = counter; + } + + @Override + public XContentParser decorate(XContentParser xContentParser) { + hasWrappedParser = true; + return new FilterXContentParserWrapper(xContentParser) { + + @Override + public Token nextToken() throws IOException { + counter++; + return super.nextToken(); + } + }; + } + + @Override + public ParsedDocument.DocumentSize meteredDocumentSize() { + return new ParsedDocument.DocumentSize(counter, counter); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java new file mode 100644 index 0000000000000..4665dc486a904 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.blobstore; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshotsIntegritySuppressor; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; + +public class BlobStoreCorruptionIT extends AbstractSnapshotIntegTestCase { + + private static final Logger logger = LogManager.getLogger(BlobStoreCorruptionIT.class); + + @Before + public void suppressConsistencyCheck() { + disableRepoConsistencyCheck("testing corruption detection involves breaking the repo"); + } + + public void testCorruptionDetection() throws Exception { + final var repositoryName = randomIdentifier(); + final var indexName = randomIdentifier(); + final var snapshotName = randomIdentifier(); + final var repositoryRootPath = randomRepoPath(); + + createRepository(repositoryName, FsRepository.TYPE, repositoryRootPath); + createIndexWithRandomDocs(indexName, between(1, 100)); + flushAndRefresh(indexName); + createSnapshot(repositoryName, snapshotName, List.of(indexName)); + + final var corruptedFile = BlobStoreCorruptionUtils.corruptRandomFile(repositoryRootPath); + final var corruptedFileType = RepositoryFileType.getRepositoryFileType(repositoryRootPath, corruptedFile); + final var corruptionDetectors = new ArrayList, ?>>(); + + // detect corruption by listing the snapshots + if (corruptedFileType == RepositoryFileType.SNAPSHOT_INFO) { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> listing snapshots"); + client().admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) + .execute(ActionTestUtils.assertNoSuccessListener(exceptionListener::onResponse)); + }); + } + + // detect corruption by taking another snapshot + if (corruptedFileType == RepositoryFileType.SHARD_GENERATION) { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> taking another snapshot"); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier()) + .setWaitForCompletion(true) + .execute(exceptionListener.map(createSnapshotResponse -> { + assertNotEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); + return new ElasticsearchException("create-snapshot failed as expected"); + })); + }); + } + + // detect corruption by restoring the snapshot + switch (corruptedFileType) { + case SNAPSHOT_INFO, GLOBAL_METADATA, INDEX_METADATA -> corruptionDetectors.add(exceptionListener -> { + logger.info("--> restoring snapshot"); + client().admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setRestoreGlobalState(corruptedFileType == RepositoryFileType.GLOBAL_METADATA || randomBoolean()) + .setWaitForCompletion(true) + .execute(ActionTestUtils.assertNoSuccessListener(exceptionListener::onResponse)); + }); + case SHARD_SNAPSHOT_INFO, SHARD_DATA -> corruptionDetectors.add(exceptionListener -> { + logger.info("--> restoring snapshot and checking for failed shards"); + SubscribableListener + // if shard-level data is corrupted then the overall restore succeeds but the shard recoveries fail + .newForked(l -> client().admin().indices().prepareDelete(indexName).execute(l)) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + .andThen( + l -> client().admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .execute(l) + ) + + .addListener(exceptionListener.map(restoreSnapshotResponse -> { + assertNotEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); + return new ElasticsearchException("post-restore recoveries failed as expected"); + })); + }); + } + + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + final var exception = safeAwait(randomFrom(corruptionDetectors)); + logger.info(Strings.format("--> corrupted [%s] and caught exception", corruptedFile), exception); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/ChunkedZipResponseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/ChunkedZipResponseIT.java new file mode 100644 index 0000000000000..be6a7387d8a0b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/ChunkedZipResponseIT.java @@ -0,0 +1,506 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest; + +import org.apache.http.ConnectionClosedException; +import org.apache.http.HttpResponse; +import org.apache.http.MalformedChunkCodingException; +import org.apache.http.nio.ContentDecoder; +import org.apache.http.nio.IOControl; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.http.protocol.HttpContext; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +import static org.elasticsearch.rest.ChunkedZipResponse.ZIP_CONTENT_TYPE; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; + +@ESIntegTestCase.ClusterScope(numDataNodes = 1) +public class ChunkedZipResponseIT extends ESIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), RandomZipResponsePlugin.class); + } + + public static class RandomZipResponsePlugin extends Plugin implements ActionPlugin { + + public static final String ROUTE = "/_random_zip_response"; + public static final String RESPONSE_FILENAME = "test-response"; + + public static final String INFINITE_ROUTE = "/_infinite_zip_response"; + public static final String GET_NEXT_PART_COUNT_DOWN_PARAM = "getNextPartCountDown"; + + public final AtomicReference responseRef = new AtomicReference<>(); + + public record EntryPart(List chunks) { + public EntryPart { + Objects.requireNonNull(chunks); + } + } + + public record EntryBody(List parts) { + public EntryBody { + Objects.requireNonNull(parts); + } + } + + public record Response(Map entries, CountDownLatch completedLatch) {} + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestHandler() { + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { + final var response = new Response(new HashMap<>(), new CountDownLatch(1)); + final var maxSize = between(1, ByteSizeUnit.MB.toIntBytes(1)); + final var entryCount = between(0, ByteSizeUnit.MB.toIntBytes(10) / maxSize); // limit total size to 10MiB + for (int i = 0; i < entryCount; i++) { + response.entries().put(randomIdentifier(), randomContent(between(1, 10), maxSize)); + } + assertTrue(responseRef.compareAndSet(null, response)); + handleZipRestRequest( + channel, + client.threadPool(), + response.completedLatch(), + () -> {}, + response.entries().entrySet().iterator() + ); + } + }, new RestHandler() { + + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, INFINITE_ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) { + final var response = new Response(null, new CountDownLatch(1)); + assertTrue(responseRef.compareAndSet(null, response)); + final var getNextPartCountDown = request.paramAsInt(GET_NEXT_PART_COUNT_DOWN_PARAM, -1); + final Runnable onGetNextPart; + final Supplier entryBodySupplier; + if (getNextPartCountDown <= 0) { + onGetNextPart = () -> {}; + entryBodySupplier = () -> randomContent(between(1, 10), ByteSizeUnit.MB.toIntBytes(1)); + } else { + final AtomicInteger remaining = new AtomicInteger(getNextPartCountDown); + entryBodySupplier = () -> randomContent(between(2, 10), ByteSizeUnit.KB.toIntBytes(1)); + if (randomBoolean()) { + onGetNextPart = () -> { + final var newRemaining = remaining.decrementAndGet(); + assertThat(newRemaining, greaterThanOrEqualTo(0)); + if (newRemaining <= 0) { + throw new ElasticsearchException("simulated failure"); + } + }; + } else { + onGetNextPart = () -> { + if (remaining.decrementAndGet() == 0) { + request.getHttpChannel().close(); + } + }; + } + } + handleZipRestRequest(channel, client.threadPool(), response.completedLatch(), onGetNextPart, new Iterator<>() { + + private long id; + + // carry on yielding content even after the channel closes + private final Semaphore trailingContentPermits = new Semaphore(between(0, 20)); + + @Override + public boolean hasNext() { + return request.getHttpChannel().isOpen() || trailingContentPermits.tryAcquire(); + } + + @Override + public Map.Entry next() { + return new Map.Entry<>() { + private final String key = Long.toString(id++); + private final EntryBody content = entryBodySupplier.get(); + + @Override + public String getKey() { + return key; + } + + @Override + public EntryBody getValue() { + return content; + } + + @Override + public EntryBody setValue(EntryBody value) { + return fail(null, "must not setValue"); + } + }; + } + }); + } + }); + } + + private static EntryBody randomContent(int partCount, int maxSize) { + if (randomBoolean()) { + return null; + } + + final var maxPartSize = maxSize / partCount; + return new EntryBody(randomList(partCount, partCount, () -> { + final var chunkCount = between(1, 10); + return randomEntryPart(chunkCount, maxPartSize / chunkCount); + })); + } + + private static EntryPart randomEntryPart(int chunkCount, int maxChunkSize) { + final var chunks = randomList(chunkCount, chunkCount, () -> randomBytesReference(between(0, maxChunkSize))); + Collections.shuffle(chunks, random()); + return new EntryPart(chunks); + } + + private static void handleZipRestRequest( + RestChannel channel, + ThreadPool threadPool, + CountDownLatch completionLatch, + Runnable onGetNextPart, + Iterator> entryIterator + ) { + try (var refs = new RefCountingRunnable(completionLatch::countDown)) { + final var chunkedZipResponse = new ChunkedZipResponse(RESPONSE_FILENAME, channel, refs.acquire()); + ThrottledIterator.run( + entryIterator, + (ref, entry) -> randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()).execute( + ActionRunnable.supply( + chunkedZipResponse.newEntryListener(entry.getKey(), Releasables.wrap(ref, refs.acquire())), + () -> entry.getValue() == null && randomBoolean() // randomBoolean() to allow some null entries to fail with NPE + ? null + : new TestBytesReferenceBodyPart( + entry.getKey(), + threadPool, + entry.getValue().parts().iterator(), + refs, + onGetNextPart + ) + ) + ), + between(1, 10), + () -> {}, + Releasables.wrap(refs.acquire(), chunkedZipResponse)::close + ); + } + } + } + + private static class TestBytesReferenceBodyPart implements ChunkedRestResponseBodyPart { + + private final String name; + private final ThreadPool threadPool; + private final Iterator chunksIterator; + private final Iterator partsIterator; + private final RefCountingRunnable refs; + private final Runnable onGetNextPart; + + TestBytesReferenceBodyPart( + String name, + ThreadPool threadPool, + Iterator partsIterator, + RefCountingRunnable refs, + Runnable onGetNextPart + ) { + this.onGetNextPart = onGetNextPart; + assert partsIterator.hasNext(); + this.name = name; + this.threadPool = threadPool; + this.partsIterator = partsIterator; + this.chunksIterator = partsIterator.next().chunks().iterator(); + this.refs = refs; + } + + @Override + public boolean isPartComplete() { + return chunksIterator.hasNext() == false; + } + + @Override + public boolean isLastPart() { + return partsIterator.hasNext() == false; + } + + @Override + public void getNextPart(ActionListener listener) { + threadPool.generic().execute(ActionRunnable.supply(listener, () -> { + onGetNextPart.run(); + return new TestBytesReferenceBodyPart(name, threadPool, partsIterator, refs, onGetNextPart); + })); + } + + @Override + public ReleasableBytesReference encodeChunk(int sizeHint, Recycler recycler) { + assert chunksIterator.hasNext(); + return new ReleasableBytesReference(chunksIterator.next(), refs.acquire()); + } + + @Override + public String getResponseContentTypeString() { + return "application/binary"; + } + } + + public void testRandomZipResponse() throws IOException { + final var request = new Request("GET", RandomZipResponsePlugin.ROUTE); + if (randomBoolean()) { + request.setOptions( + RequestOptions.DEFAULT.toBuilder() + .addHeader("accept-encoding", String.join(", ", randomSubsetOf(List.of("deflate", "gzip", "zstd", "br")))) + ); + } + final var response = getRestClient().performRequest(request); + assertEquals(ZIP_CONTENT_TYPE, response.getHeader("Content-Type")); + assertNull(response.getHeader("content-encoding")); // zip file is already compressed + assertEquals( + "attachment; filename=\"" + RandomZipResponsePlugin.RESPONSE_FILENAME + ".zip\"", + response.getHeader("Content-Disposition") + ); + final var pathPrefix = RandomZipResponsePlugin.RESPONSE_FILENAME + "/"; + + final var actualEntries = new HashMap(); + final var copyBuffer = new byte[PageCacheRecycler.BYTE_PAGE_SIZE]; + + try (var zipStream = new ZipInputStream(response.getEntity().getContent())) { + ZipEntry zipEntry; + while ((zipEntry = zipStream.getNextEntry()) != null) { + assertThat(zipEntry.getName(), startsWith(pathPrefix)); + final var name = zipEntry.getName().substring(pathPrefix.length()); + try (var bytesStream = new BytesStreamOutput()) { + while (true) { + final var readLength = zipStream.read(copyBuffer, 0, copyBuffer.length); + if (readLength < 0) { + break; + } + bytesStream.write(copyBuffer, 0, readLength); + } + actualEntries.put(name, bytesStream.bytes()); + } + } + } finally { + assertEquals(getExpectedEntries(), actualEntries); + } + } + + public void testAbort() throws IOException { + final var request = new Request("GET", RandomZipResponsePlugin.INFINITE_ROUTE); + final var responseStarted = new CountDownLatch(1); + final var bodyConsumed = new CountDownLatch(1); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(() -> new HttpAsyncResponseConsumer<>() { + + final ByteBuffer readBuffer = ByteBuffer.allocate(ByteSizeUnit.KB.toIntBytes(4)); + int bytesToConsume = ByteSizeUnit.MB.toIntBytes(1); + + @Override + public void responseReceived(HttpResponse response) { + assertEquals("application/zip", response.getHeaders("Content-Type")[0].getValue()); + final var contentDispositionHeader = response.getHeaders("Content-Disposition")[0].getElements()[0]; + assertEquals("attachment", contentDispositionHeader.getName()); + assertEquals( + RandomZipResponsePlugin.RESPONSE_FILENAME + ".zip", + contentDispositionHeader.getParameterByName("filename").getValue() + ); + responseStarted.countDown(); + } + + @Override + public void consumeContent(ContentDecoder decoder, IOControl ioControl) throws IOException { + readBuffer.clear(); + final var bytesRead = decoder.read(readBuffer); + if (bytesRead > 0) { + bytesToConsume -= bytesRead; + } + + if (bytesToConsume <= 0) { + bodyConsumed.countDown(); + ioControl.shutdown(); + } + } + + @Override + public void responseCompleted(HttpContext context) {} + + @Override + public void failed(Exception ex) {} + + @Override + public Exception getException() { + return null; + } + + @Override + public HttpResponse getResult() { + return null; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public void close() {} + + @Override + public boolean cancel() { + return false; + } + })); + + try { + try (var restClient = createRestClient(internalCluster().getRandomNodeName())) { + // one-node REST client to avoid retries + expectThrows(ConnectionClosedException.class, () -> restClient.performRequest(request)); + } + safeAwait(responseStarted); + safeAwait(bodyConsumed); + } finally { + assertNull(getExpectedEntries()); // mainly just checking that all refs are released + } + } + + public void testGetNextPartFailure() throws IOException { + final var request = new Request("GET", RandomZipResponsePlugin.INFINITE_ROUTE); + request.addParameter(RandomZipResponsePlugin.GET_NEXT_PART_COUNT_DOWN_PARAM, Integer.toString(between(1, 100))); + + try (var restClient = createRestClient(internalCluster().getRandomNodeName())) { + // one-node REST client to avoid retries + assertThat( + safeAwaitFailure( + Response.class, + l -> restClient.performRequestAsync(request, ActionTestUtils.wrapAsRestResponseListener(l)) + ), + anyOf(instanceOf(ConnectionClosedException.class), instanceOf(MalformedChunkCodingException.class)) + ); + } finally { + assertNull(getExpectedEntries()); // mainly just checking that all refs are released + } + } + + private static Map getExpectedEntries() { + final List> nodeResponses = StreamSupport + // concatenate all the chunks in all the entries + .stream(internalCluster().getInstances(PluginsService.class).spliterator(), false) + .flatMap(p -> p.filterPlugins(RandomZipResponsePlugin.class)) + .flatMap(p -> { + final var maybeResponse = p.responseRef.getAndSet(null); + if (maybeResponse == null) { + return Stream.of(); + } else { + safeAwait(maybeResponse.completedLatch()); // ensures that all refs have been released + if (maybeResponse.entries() == null) { + return Stream.of((Map) null); + } else { + final var expectedEntries = Maps.newMapWithExpectedSize(maybeResponse.entries().size()); + maybeResponse.entries().forEach((entryName, entryBody) -> { + if (entryBody != null) { + try (var bytesStreamOutput = new BytesStreamOutput()) { + for (final var part : entryBody.parts()) { + for (final var chunk : part.chunks()) { + chunk.writeTo(bytesStreamOutput); + } + } + expectedEntries.put(entryName, bytesStreamOutput.bytes()); + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + return Stream.of(expectedEntries); + } + } + }) + .toList(); + assertThat(nodeResponses, hasSize(1)); + return nodeResponses.get(0); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java index 7ad464fee92ba..2e05dbfad6085 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/RestControllerIT.java @@ -20,9 +20,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.TelemetryPlugin; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; @@ -34,10 +37,10 @@ import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; -import java.util.stream.Stream; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numClientNodes = 1, numDataNodes = 0) @@ -54,7 +57,7 @@ public void testHeadersEmittedWithChunkedResponses() throws IOException { assertEquals(ChunkedResponseWithHeadersPlugin.HEADER_VALUE, response.getHeader(ChunkedResponseWithHeadersPlugin.HEADER_NAME)); } - public void testMetricsEmittedOnSuccess() throws IOException { + public void testMetricsEmittedOnSuccess() throws Exception { final var client = getRestClient(); final var request = new Request("GET", TestEchoStatusCodePlugin.ROUTE); request.addParameter("status_code", "200"); @@ -70,7 +73,7 @@ public void testMetricsEmittedOnSuccess() throws IOException { }); } - public void testMetricsEmittedOnRestError() throws IOException { + public void testMetricsEmittedOnRestError() throws Exception { final var client = getRestClient(); final var request = new Request("GET", TestEchoStatusCodePlugin.ROUTE); request.addParameter("status_code", "503"); @@ -85,7 +88,7 @@ public void testMetricsEmittedOnRestError() throws IOException { }); } - public void testMetricsEmittedOnWrongMethod() throws IOException { + public void testMetricsEmittedOnWrongMethod() throws Exception { final var client = getRestClient(); final var request = new Request("DELETE", TestEchoStatusCodePlugin.ROUTE); final var response = expectThrows(ResponseException.class, () -> client.performRequest(request)); @@ -97,31 +100,39 @@ public void testMetricsEmittedOnWrongMethod() throws IOException { }); } - private static void assertMeasurement(Consumer measurementConsumer) { - var measurements = new ArrayList(); - for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { - final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); - telemetryPlugin.collect(); - - final var metrics = telemetryPlugin.getLongCounterMeasurement(RestController.METRIC_REQUESTS_TOTAL); - measurements.addAll(metrics); - } - assertThat(measurements, hasSize(1)); - measurementConsumer.accept(measurements.get(0)); + private void assertMeasurement(Consumer measurementConsumer) throws Exception { + assertBusy(() -> { + var measurements = new ArrayList(); + for (var nodeName : internalCluster().getNodeNames()) { + PluginsService pluginsService = internalCluster().getInstance(PluginsService.class, nodeName); + var telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class).toList(); + + assertThat(telemetryPlugins, hasSize(1)); + assertThat(telemetryPlugins.get(0), instanceOf(TestTelemetryPlugin.class)); + var telemetryPlugin = (TestTelemetryPlugin) telemetryPlugins.get(0); + + telemetryPlugin.collect(); + + final var metrics = telemetryPlugin.getLongCounterMeasurement(RestController.METRIC_REQUESTS_TOTAL); + logger.info("collecting [{}] metrics from [{}]", metrics.size(), nodeName); + measurements.addAll(metrics); + } + assertThat(measurements, hasSize(1)); + measurementConsumer.accept(measurements.get(0)); + }); } @Override protected Collection> nodePlugins() { - return Stream.concat( - super.nodePlugins().stream(), - Stream.of(ChunkedResponseWithHeadersPlugin.class, TestEchoStatusCodePlugin.class, TestTelemetryPlugin.class) - ).toList(); + return List.of(ChunkedResponseWithHeadersPlugin.class, TestEchoStatusCodePlugin.class, TestTelemetryPlugin.class); } public static class TestEchoStatusCodePlugin extends Plugin implements ActionPlugin { static final String ROUTE = "/_test/echo_status_code"; static final String NAME = "test_echo_status_code"; + private static final Logger logger = LogManager.getLogger(TestEchoStatusCodePlugin.class); + @Override public Collection getRestHandlers( Settings settings, @@ -148,7 +159,8 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { var statusCode = request.param("status_code"); - client.getLocalNodeId(); + logger.info("received echo request for {}", statusCode); + var restStatus = RestStatus.fromCode(Integer.parseInt(statusCode)); return channel -> { final var response = RestResponse.chunked( @@ -161,6 +173,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli null ); channel.sendResponse(response); + logger.info("sent response"); }; } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java new file mode 100644 index 0000000000000..ae91caea888db --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/StreamingXContentResponseIT.java @@ -0,0 +1,300 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest; + +import org.apache.http.ConnectionClosedException; +import org.apache.http.HttpResponse; +import org.apache.http.nio.ContentDecoder; +import org.apache.http.nio.IOControl; +import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.http.protocol.HttpContext; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThrottledIterator; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static org.hamcrest.Matchers.hasSize; + +@ESIntegTestCase.ClusterScope(numDataNodes = 1) +public class StreamingXContentResponseIT extends ESIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopyNoNullElements(super.nodePlugins(), RandomXContentResponsePlugin.class); + } + + public static class RandomXContentResponsePlugin extends Plugin implements ActionPlugin { + + public static final String ROUTE = "/_random_xcontent_response"; + + public static final String INFINITE_ROUTE = "/_random_infinite_xcontent_response"; + + public final AtomicReference responseRef = new AtomicReference<>(); + + public record Response(Map fragments, CountDownLatch completedLatch) {} + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of( + // handler that returns a normal (finite) response + new RestHandler() { + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws IOException { + final var response = new Response(new HashMap<>(), new CountDownLatch(1)); + final var entryCount = between(0, 10000); + for (int i = 0; i < entryCount; i++) { + response.fragments().put(randomIdentifier(), randomIdentifier()); + } + assertTrue(responseRef.compareAndSet(null, response)); + handleStreamingXContentRestRequest( + channel, + client.threadPool(), + response.completedLatch(), + response.fragments().entrySet().iterator() + ); + } + }, + + // handler that just keeps on yielding chunks until aborted + new RestHandler() { + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.GET, INFINITE_ROUTE)); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws IOException { + final var response = new Response(new HashMap<>(), new CountDownLatch(1)); + assertTrue(responseRef.compareAndSet(null, new Response(null, response.completedLatch()))); + handleStreamingXContentRestRequest(channel, client.threadPool(), response.completedLatch(), new Iterator<>() { + + private long id; + + // carry on yielding content even after the channel closes + private final Semaphore trailingContentPermits = new Semaphore(between(0, 20)); + + @Override + public boolean hasNext() { + return request.getHttpChannel().isOpen() || trailingContentPermits.tryAcquire(); + } + + @Override + public Map.Entry next() { + return new Map.Entry<>() { + private final String key = Long.toString(id++); + private final String content = randomIdentifier(); + + @Override + public String getKey() { + return key; + } + + @Override + public String getValue() { + return content; + } + + @Override + public String setValue(String value) { + return fail(null, "must not setValue"); + } + }; + } + }); + } + } + ); + } + + private static void handleStreamingXContentRestRequest( + RestChannel channel, + ThreadPool threadPool, + CountDownLatch completionLatch, + Iterator> fragmentIterator + ) throws IOException { + try (var refs = new RefCountingRunnable(completionLatch::countDown)) { + final var streamingXContentResponse = new StreamingXContentResponse(channel, channel.request(), refs.acquire()); + streamingXContentResponse.writeFragment(p -> ChunkedToXContentHelper.startObject(), refs.acquire()); + final var finalRef = refs.acquire(); + ThrottledIterator.run( + fragmentIterator, + (ref, fragment) -> randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()).execute( + ActionRunnable.run(ActionListener.releaseAfter(refs.acquireListener(), ref), () -> { + Thread.yield(); + streamingXContentResponse.writeFragment( + p -> ChunkedToXContentHelper.field(fragment.getKey(), fragment.getValue()), + refs.acquire() + ); + }) + ), + between(1, 10), + () -> {}, + () -> { + try (streamingXContentResponse; finalRef) { + streamingXContentResponse.writeFragment(p -> ChunkedToXContentHelper.endObject(), refs.acquire()); + } + } + ); + } + } + } + + public void testRandomStreamingXContentResponse() throws IOException { + final var request = new Request("GET", RandomXContentResponsePlugin.ROUTE); + final var response = getRestClient().performRequest(request); + final var actualEntries = XContentHelper.convertToMap(JsonXContent.jsonXContent, response.getEntity().getContent(), false); + assertEquals(getExpectedEntries(), actualEntries); + } + + public void testAbort() throws IOException { + final var request = new Request("GET", RandomXContentResponsePlugin.INFINITE_ROUTE); + final var responseStarted = new CountDownLatch(1); + final var bodyConsumed = new CountDownLatch(1); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(() -> new HttpAsyncResponseConsumer<>() { + + final ByteBuffer readBuffer = ByteBuffer.allocate(ByteSizeUnit.KB.toIntBytes(4)); + int bytesToConsume = ByteSizeUnit.MB.toIntBytes(1); + + @Override + public void responseReceived(HttpResponse response) { + responseStarted.countDown(); + } + + @Override + public void consumeContent(ContentDecoder decoder, IOControl ioControl) throws IOException { + readBuffer.clear(); + final var bytesRead = decoder.read(readBuffer); + if (bytesRead > 0) { + bytesToConsume -= bytesRead; + } + + if (bytesToConsume <= 0) { + bodyConsumed.countDown(); + ioControl.shutdown(); + } + } + + @Override + public void responseCompleted(HttpContext context) {} + + @Override + public void failed(Exception ex) {} + + @Override + public Exception getException() { + return null; + } + + @Override + public HttpResponse getResult() { + return null; + } + + @Override + public boolean isDone() { + return false; + } + + @Override + public void close() {} + + @Override + public boolean cancel() { + return false; + } + })); + + try { + try (var restClient = createRestClient(internalCluster().getRandomNodeName())) { + // one-node REST client to avoid retries + expectThrows(ConnectionClosedException.class, () -> restClient.performRequest(request)); + } + safeAwait(responseStarted); + safeAwait(bodyConsumed); + } finally { + assertNull(getExpectedEntries()); // mainly just checking that all refs are released + } + } + + private static Map getExpectedEntries() { + final List> nodeResponses = StreamSupport + // concatenate all the chunks in all the entries + .stream(internalCluster().getInstances(PluginsService.class).spliterator(), false) + .flatMap(p -> p.filterPlugins(RandomXContentResponsePlugin.class)) + .flatMap(p -> { + final var response = p.responseRef.getAndSet(null); + if (response == null) { + return Stream.of(); + } else { + safeAwait(response.completedLatch()); // ensures that all refs have been released + return Stream.of(response.fragments()); + } + }) + .toList(); + assertThat(nodeResponses, hasSize(1)); + return nodeResponses.get(0); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index 619e7c9d9edec..fe798f9233cbb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -7,6 +7,14 @@ */ package org.elasticsearch.script; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -20,6 +28,7 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; public class StoredScriptsIT extends ESIntegTestCase { @@ -41,34 +50,66 @@ protected Collection> nodePlugins() { } public void testBasics() { - assertAcked(clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("foobar", Strings.format(""" {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON)); - String script = clusterAdmin().prepareGetStoredScript("foobar").get().getSource().getSource(); + """, LANG)); + String script = safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar")).getSource() + .getSource(); assertNotNull(script); assertEquals("1", script); - assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar")); - StoredScriptSource source = clusterAdmin().prepareGetStoredScript("foobar").get().getSource(); + assertAcked( + safeExecute( + TransportDeleteStoredScriptAction.TYPE, + new DeleteStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "foobar") + ) + ); + StoredScriptSource source = safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar")) + .getSource(); assertNull(source); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - clusterAdmin().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" - {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON) + assertEquals( + "Validation Failed: 1: id cannot contain '#' for stored script;", + asInstanceOf( + IllegalArgumentException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + AcknowledgedResponse.class, + l -> client().execute( + TransportPutStoredScriptAction.TYPE, + new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id("id#") + .content(new BytesArray(Strings.format(""" + {"script": {"lang": "%s", "source": "1"} } + """, LANG)), XContentType.JSON), + l + ) + ) + ) + ).getMessage() ); - assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } public void testMaxScriptSize() { - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" - {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ - """, LANG)), XContentType.JSON) + assertEquals( + "exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", + asInstanceOf( + IllegalArgumentException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + AcknowledgedResponse.class, + l -> client().execute( + TransportPutStoredScriptAction.TYPE, + new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id("foobar") + .content(new BytesArray(Strings.format(""" + {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ + """, LANG)), XContentType.JSON), + l + ) + + ) + ) + ).getMessage() ); - assertEquals("exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", e.getMessage()); } public static class CustomScriptPlugin extends MockScriptPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index efb283f047bb2..b83b74ca8b639 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1549,8 +1549,7 @@ public void testDSTEndTransition() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=date") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=date").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 0b92372652597..9df1fae2431f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -616,7 +615,7 @@ public void testNoRangesInQuery() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("date", "type=date") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 668b9d79c49a8..237f296f25751 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; @@ -942,7 +941,7 @@ public void testOtherDocCount() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=float") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 5894837e257bf..d117f593348d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; @@ -1115,7 +1114,7 @@ public void testDecimalIntervalAndOffset() throws Exception { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=float") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index f0c5cbf9c76bb..cbb3850422800 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; @@ -899,8 +898,7 @@ public void testOtherDocCount() { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 71402d3e9c1d8..f8ace23057ea6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -25,7 +25,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.SuiteScopeTestCase @@ -93,6 +92,7 @@ public void testRandomSamplerConsistentSeed() { double[] sampleMonotonicValue = new double[1]; double[] sampleNumericValue = new double[1]; long[] sampledDocCount = new long[1]; + double tolerance = 1e-14; // initialize the values assertResponse( prepareSearch("idx").setPreference("shard:0") @@ -123,9 +123,12 @@ public void testRandomSamplerConsistentSeed() { ), response -> { InternalRandomSampler sampler = response.getAggregations().get("sampler"); - assertThat(((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(), equalTo(sampleMonotonicValue[0])); - assertThat(((Avg) sampler.getAggregations().get("mean_numeric")).getValue(), equalTo(sampleNumericValue[0])); - assertThat(sampler.getDocCount(), equalTo(sampledDocCount[0])); + double monotonicValue = ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); + double numericValue = ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); + long docCount = sampler.getDocCount(); + assertEquals(monotonicValue, sampleMonotonicValue[0], tolerance); + assertEquals(numericValue, sampleNumericValue[0], tolerance); + assertEquals(docCount, sampledDocCount[0]); } ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 10e3649e9f161..6a60969e632ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -896,7 +895,7 @@ public void testEmptyAggregation() throws Exception { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("i", "type=integer") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 35a117ac8922b..5fa010e4b091e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -548,7 +547,7 @@ public void testReduceFromSeveralShards() throws IOException, ExecutionException public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java index c45cabf425b14..69b84c6b98286 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; @@ -46,9 +45,7 @@ private void indexDocs(int numDocs) { } public void testSingleValuedString() { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + final Settings.Builder settings = indexSettings(2, 0); createIndex(index, settings.build()); // We want to trigger the usage of cuckoo filters that happen only when there are // more than 10k distinct values in one shard. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index 662744ddfe77e..fa9a9ef2a7f41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.query.QueryBuilders; @@ -1198,7 +1197,7 @@ public void testOtherDocCount() { public void testScriptCaching() throws Exception { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=keyword") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 750868defde97..fde18fb283a6e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -905,8 +904,7 @@ private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 13d66a5cf3949..a4da7c4e893be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -568,8 +567,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index cd69fb8241ef2..43e4aecb07f7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -541,8 +540,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index 6c80931914ac6..4ded290f93961 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -494,8 +493,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index eeee745b32f92..042c8c1fb0e35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -10,8 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; @@ -27,7 +25,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.xcontent.XContentType; import org.junit.Before; import java.io.IOException; @@ -42,6 +39,7 @@ import java.util.function.Consumer; import java.util.function.Function; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -300,21 +298,21 @@ public void setupSuiteScopeCluster() throws Exception { // When using the MockScriptPlugin we can map Stored scripts to inline scripts: // the id of the stored script is used in test method while the source of the stored script // must match a predefined script from CustomScriptPlugin.pluginScripts() method - assertAcked(clusterAdmin().preparePutStoredScript().setId("initScript_stored").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("initScript_stored", Strings.format(""" {"script": {"lang": "%s", "source": "vars.multiplier = 3"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + """, MockScriptPlugin.NAME)); - assertAcked(clusterAdmin().preparePutStoredScript().setId("mapScript_stored").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("mapScript_stored", Strings.format(""" {"script": {"lang": "%s", "source": "state.list.add(vars.multiplier)"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + """, MockScriptPlugin.NAME)); - assertAcked(clusterAdmin().preparePutStoredScript().setId("combineScript_stored").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("combineScript_stored", Strings.format(""" {"script": {"lang": "%s", "source": "sum state values as a new aggregation"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + """, MockScriptPlugin.NAME)); - assertAcked(clusterAdmin().preparePutStoredScript().setId("reduceScript_stored").setContent(new BytesArray(Strings.format(""" + putJsonStoredScript("reduceScript_stored", Strings.format(""" {"script": {"lang": "%s", "source": "sum all states (lists) values as a new aggregation"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + """, MockScriptPlugin.NAME)); indexRandom(true, builders); ensureSearchable(); @@ -1139,8 +1137,7 @@ public void testScriptCaching() throws Exception { Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap()); assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 84e0bee396c9d..78adca3377f0b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -234,8 +233,7 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index d50c101dbd5d1..fd173b8f48a12 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -208,8 +207,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 9c737cb734f16..9c11b6cd14d54 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -485,8 +484,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 1c101324cd5fc..c67a237b2fc17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -457,8 +456,7 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index fc753b0844c46..42f04ff54c82a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -1082,9 +1082,7 @@ public void testScriptCaching() throws Exception { try { assertAcked( prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings( - Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1) - ) + .setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index c3feff6f3eaaa..445ad8e0b9b11 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -215,8 +214,7 @@ public void testMultiValuedScriptWithParams() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true)) ); indexRandom( true, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index dc612d6bad5ce..bc8142a629f7c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; @@ -24,7 +23,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; @@ -35,12 +33,12 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -514,14 +512,11 @@ public void testInlineScriptReturnNull() { } public void testStoredScript() { - assertAcked( - clusterAdmin().preparePutStoredScript() - .setId("my_script") - // Script source is not interpreted but it references a pre-defined script from CustomScriptPlugin - .setContent( - new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\", \"source\": \"my_script\" } }"), - XContentType.JSON - ) + + putJsonStoredScript( + "my_script", + // Script source is not interpreted but it references a pre-defined script from CustomScriptPlugin + "{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\", \"source\": \"my_script\" } }" ); assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java new file mode 100644 index 0000000000000..bb18b8f1b702d --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -0,0 +1,716 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.ccs; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.Result; +import org.elasticsearch.action.search.ClosePointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.FeatureFlag; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.query.SlowRunningQueryBuilder; +import org.elasticsearch.search.query.ThrowingQueryBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.usage.UsageService; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; +import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.MRT_FEATURE; +import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.WILDCARD_FEATURE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; + +public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase { + private static final Logger LOGGER = LogManager.getLogger(CCSUsageTelemetryIT.class); + private static final String REMOTE1 = "cluster-a"; + private static final String REMOTE2 = "cluster-b"; + private static final FeatureFlag CCS_TELEMETRY_FEATURE_FLAG = new FeatureFlag("ccs_telemetry"); + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE1, REMOTE2); + } + + @Rule + public SkipUnavailableRule skipOverride = new SkipUnavailableRule(REMOTE1, REMOTE2); + + @BeforeClass + protected static void skipIfTelemetryDisabled() { + assumeTrue("Skipping test as CCS_TELEMETRY_FEATURE_FLAG is disabled", CCS_TELEMETRY_FEATURE_FLAG.isEnabled()); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + var map = skipOverride.getMap(); + LOGGER.info("Using skip_unavailable map: [{}]", map); + return map; + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), CrossClusterSearchIT.TestQueryBuilderPlugin.class); + } + + private SearchRequest makeSearchRequest(String... indices) { + SearchRequest searchRequest = new SearchRequest(indices); + searchRequest.allowPartialSearchResults(false); + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + return searchRequest; + } + + /** + * Run search request and get telemetry from it + */ + private CCSTelemetrySnapshot getTelemetryFromSearch(SearchRequest searchRequest) throws ExecutionException, InterruptedException { + // We want to send search to a specific node (we don't care which one) so that we could + // collect the CCS telemetry from it later + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + // We don't care here too much about the response, we just want to trigger the telemetry collection. + // So we check it's not null and leave the rest to other tests. + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + return getTelemetrySnapshot(nodeName); + } + + private CCSTelemetrySnapshot getTelemetryFromFailedSearch(SearchRequest searchRequest) throws Exception { + // We want to send search to a specific node (we don't care which one) so that we could + // collect the CCS telemetry from it later + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + PlainActionFuture queryFuture = new PlainActionFuture<>(); + cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest, queryFuture); + assertBusy(() -> assertTrue(queryFuture.isDone())); + + // We expect failure, but we don't care too much which failure it is in this test + ExecutionException ee = expectThrows(ExecutionException.class, queryFuture::get); + assertNotNull(ee.getCause()); + + return getTelemetrySnapshot(nodeName); + } + + /** + * Create search request for indices and get telemetry from it + */ + private CCSTelemetrySnapshot getTelemetryFromSearch(String... indices) throws ExecutionException, InterruptedException { + return getTelemetryFromSearch(makeSearchRequest(indices)); + } + + /** + * Search on all remotes + */ + public void testAllRemotesSearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(searchRequest); + + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + assertResponse( + cluster(LOCAL_CLUSTER).client(nodeName) + .filterWithHeader(Map.of(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, "kibana")) + .search(searchRequest), + Assert::assertNotNull + ); + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(minimizeRoundtrips ? 1L : 0L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(minimizeRoundtrips ? 0L : 1L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + if (minimizeRoundtrips) { + assertThat(telemetry.getFeatureCounts().get(MRT_FEATURE), equalTo(1L)); + } else { + assertThat(telemetry.getFeatureCounts().get(MRT_FEATURE), equalTo(null)); + } + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(null)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + var clusterTelemetry = perCluster.get(clusterAlias); + assertThat(clusterTelemetry.getCount(), equalTo(1L)); + assertThat(clusterTelemetry.getSkippedCount(), equalTo(0L)); + assertThat(clusterTelemetry.getTook().count(), equalTo(1L)); + } + + // another search + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getSuccessCount(), equalTo(2L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(2L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(minimizeRoundtrips ? 2L : 0L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(minimizeRoundtrips ? 0L : 2L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + var clusterTelemetry = perCluster.get(clusterAlias); + assertThat(clusterTelemetry.getCount(), equalTo(2L)); + assertThat(clusterTelemetry.getSkippedCount(), equalTo(0L)); + assertThat(clusterTelemetry.getTook().count(), equalTo(2L)); + } + } + + /** + * Search on a specific remote + */ + public void testOneRemoteSearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + // Make request to cluster a + SearchRequest searchRequest = makeSearchRequest(localIndex, REMOTE1 + ":" + remoteIndex); + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(2)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2), equalTo(null)); + assertThat(telemetry.getClientCounts().size(), equalTo(0)); + + // Make request to cluster b + searchRequest = makeSearchRequest(localIndex, REMOTE2 + ":" + remoteIndex); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getSuccessCount(), equalTo(2L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2).getTook().count(), equalTo(1L)); + } + + /** + * Local search should not produce any telemetry at all + */ + public void testLocalOnlySearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch(localIndex); + assertThat(telemetry.getTotalCount(), equalTo(0L)); + } + + /** + * Search on remotes only, without local index + */ + public void testRemoteOnlySearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch("*:" + remoteIndex); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(perCluster.size(), equalTo(2)); + assertThat(telemetry.getClientCounts().size(), equalTo(0)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getSkippedCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2).getSkippedCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE2).getTook().count(), equalTo(1L)); + } + + /** + * Count wildcard searches. Only wildcards in index names (not in cluster names) are counted. + */ + public void testWildcardSearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(null)); + + searchRequest = makeSearchRequest("*", REMOTE1 + ":" + remoteIndex); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(1L)); + + searchRequest = makeSearchRequest(localIndex, REMOTE2 + ":*"); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(3L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(2L)); + + // Wildcards in cluster name do not count + searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(4L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(2L)); + + // Wildcard in the middle of the index name counts + searchRequest = makeSearchRequest(localIndex, REMOTE2 + ":rem*"); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(5L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(3L)); + + // Wildcard only counted once per search + searchRequest = makeSearchRequest("*", REMOTE1 + ":rem*", REMOTE2 + ":remote*"); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(6L)); + assertThat(telemetry.getFeatureCounts().get(WILDCARD_FEATURE), equalTo(4L)); + } + + /** + * Test complete search failure + */ + public void testFailedSearch() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + // shardId -1 means to throw the Exception on all shards, so should result in complete search failure + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), -1); + searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); + searchRequest.allowPartialSearchResults(true); + + CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getTook().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + Map expectedFailures = Map.of(Result.UNKNOWN.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailures)); + } + + /** + * Search when all the remotes failed and skipped + */ + public void testSkippedAllRemotesSearch() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + // throw Exception on all shards of remoteIndex, but not against localIndex + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder( + randomLong(), + new IllegalStateException("index corrupted"), + remoteIndex + ); + searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); + searchRequest.allowPartialSearchResults(true); + + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + // Note that this counts how many searches had skipped remotes, not how many remotes are skipped + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + // Still count the remote that failed + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + // Each remote will have its skipped count bumped + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String remote : remoteClusterAlias()) { + assertThat(perCluster.get(remote).getCount(), equalTo(0L)); + assertThat(perCluster.get(remote).getSkippedCount(), equalTo(1L)); + assertThat(perCluster.get(remote).getTook().count(), equalTo(0L)); + } + } + + public void testSkippedOneRemoteSearch() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + // Remote1 will fail, Remote2 will just do nothing but it counts as success + SearchRequest searchRequest = makeSearchRequest(localIndex, REMOTE1 + ":" + remoteIndex, REMOTE2 + ":" + "nosuchindex*"); + // throw Exception on all shards of remoteIndex, but not against localIndex + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder( + randomLong(), + new IllegalStateException("index corrupted"), + remoteIndex + ); + searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); + searchRequest.allowPartialSearchResults(true); + + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + assertResponse(cluster(LOCAL_CLUSTER).client(nodeName).search(searchRequest), Assert::assertNotNull); + + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + // Note that this counts how many searches had skipped remotes, not how many remotes are skipped + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + // Still count the remote that failed + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + // Each remote will have its skipped count bumped + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + // This one is skipped + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE1).getSkippedCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(0L)); + // This one is OK + assertThat(perCluster.get(REMOTE2).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2).getSkippedCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE2).getTook().count(), equalTo(1L)); + } + + /** + * Test what happens if remote times out - it should be skipped + */ + public void testRemoteTimesOut() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, REMOTE1 + ":" + remoteIndex); + // This works only with minimize_roundtrips enabled, since otherwise timed out shards will be counted as + // partial failure, and we disable partial results.. + searchRequest.setCcsMinimizeRoundtrips(true); + + TimeValue searchTimeout = new TimeValue(500, TimeUnit.MILLISECONDS); + // query builder that will sleep for the specified amount of time in the query phase + SlowRunningQueryBuilder slowRunningQueryBuilder = new SlowRunningQueryBuilder(searchTimeout.millis() * 5, remoteIndex); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); + searchRequest.source(sourceBuilder); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(1L)); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(2)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE1).getSkippedCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(0L)); + assertThat(perCluster.get(REMOTE2), equalTo(null)); + } + + /** + * Test what happens if remote times out and there's no local - it should be skipped + */ + public void testRemoteOnlyTimesOut() throws Exception { + Map testClusterInfo = setupClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":" + remoteIndex); + // This works only with minimize_roundtrips enabled, since otherwise timed out shards will be counted as + // partial failure, and we disable partial results... + searchRequest.setCcsMinimizeRoundtrips(true); + + TimeValue searchTimeout = new TimeValue(100, TimeUnit.MILLISECONDS); + // query builder that will sleep for the specified amount of time in the query phase + SlowRunningQueryBuilder slowRunningQueryBuilder = new SlowRunningQueryBuilder(searchTimeout.millis() * 5, remoteIndex); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); + searchRequest.source(sourceBuilder); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(1L)); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(1)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(0L)); + assertThat(perCluster.get(REMOTE1).getSkippedCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(0L)); + assertThat(perCluster.get(REMOTE2), equalTo(null)); + } + + @SkipOverride(aliases = { REMOTE1 }) + public void testRemoteTimesOutFailure() throws Exception { + Map testClusterInfo = setupClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":" + remoteIndex); + + TimeValue searchTimeout = new TimeValue(100, TimeUnit.MILLISECONDS); + // query builder that will sleep for the specified amount of time in the query phase + SlowRunningQueryBuilder slowRunningQueryBuilder = new SlowRunningQueryBuilder(searchTimeout.millis() * 5, remoteIndex); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); + searchRequest.source(sourceBuilder); + + CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + // Failure is not skipping + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + // Still count the remote that failed + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(1L)); + assertThat(telemetry.getTook().count(), equalTo(0L)); + Map expectedFailure = Map.of(Result.TIMEOUT.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + // No per-cluster data on total failure + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + /** + * Search when all the remotes failed and not skipped + */ + @SkipOverride(aliases = { REMOTE1, REMOTE2 }) + public void testFailedAllRemotesSearch() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + SearchRequest searchRequest = makeSearchRequest(localIndex, "*:" + remoteIndex); + // throw Exception on all shards of remoteIndex, but not against localIndex + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder( + randomLong(), + new IllegalStateException("index corrupted"), + remoteIndex + ); + searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); + + CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + // Failure is not skipping + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + // Still count the remote that failed + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getTook().count(), equalTo(0L)); + Map expectedFailure = Map.of(Result.REMOTES_UNAVAILABLE.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + // No per-cluster data on total failure + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + /** + * Test that we're still counting remote search even if remote cluster has no such index + */ + public void testRemoteHasNoIndex() throws Exception { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + + CCSTelemetrySnapshot telemetry = getTelemetryFromSearch(localIndex, REMOTE1 + ":" + "no_such_index*"); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(2)); + assertThat(perCluster.get(REMOTE1).getCount(), equalTo(1L)); + assertThat(perCluster.get(REMOTE1).getTook().count(), equalTo(1L)); + assertThat(perCluster.get(REMOTE2), equalTo(null)); + } + + /** + * Test that we're still counting remote search even if remote cluster has no such index + */ + @SkipOverride(aliases = { REMOTE1 }) + public void testRemoteHasNoIndexFailure() throws Exception { + SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":no_such_index"); + CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(0)); + Map expectedFailure = Map.of(Result.NOT_FOUND.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + } + + public void testPITSearch() throws ExecutionException, InterruptedException { + Map testClusterInfo = setupClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + + OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest(localIndex, "*:" + remoteIndex).keepAlive( + TimeValue.timeValueMinutes(5) + ); + String nodeName = cluster(LOCAL_CLUSTER).getRandomNodeName(); + var client = cluster(LOCAL_CLUSTER).client(nodeName); + BytesReference pitID = client.execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); + SearchRequest searchRequest = new SearchRequest().source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) + .sort("@timestamp") + .size(10) + ); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + + assertResponse(client.search(searchRequest), Assert::assertNotNull); + // do it again + assertResponse(client.search(searchRequest), Assert::assertNotNull); + client.execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitID)).actionGet(); + CCSTelemetrySnapshot telemetry = getTelemetrySnapshot(nodeName); + + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getSuccessCount(), equalTo(2L)); + } + + private CCSTelemetrySnapshot getTelemetrySnapshot(String nodeName) { + var usage = cluster(LOCAL_CLUSTER).getInstance(UsageService.class, nodeName); + return usage.getCcsUsageHolder().getCCSTelemetrySnapshot(); + } + + private Map setupClusters() { + String localIndex = "demo"; + int numShardsLocal = randomIntBetween(2, 10); + Settings localSettings = indexSettings(numShardsLocal, randomIntBetween(0, 1)).build(); + assertAcked( + client(LOCAL_CLUSTER).admin() + .indices() + .prepareCreate(localIndex) + .setSettings(localSettings) + .setMapping("@timestamp", "type=date", "f", "type=text") + ); + indexDocs(client(LOCAL_CLUSTER), localIndex); + + String remoteIndex = "prod"; + int numShardsRemote = randomIntBetween(2, 10); + for (String clusterAlias : remoteClusterAlias()) { + final InternalTestCluster remoteCluster = cluster(clusterAlias); + remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(1, 3)); + assertAcked( + client(clusterAlias).admin() + .indices() + .prepareCreate(remoteIndex) + .setSettings(indexSettings(numShardsRemote, randomIntBetween(0, 1))) + .setMapping("@timestamp", "type=date", "f", "type=text") + ); + assertFalse( + client(clusterAlias).admin() + .cluster() + .prepareHealth(remoteIndex) + .setWaitForYellowStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get() + .isTimedOut() + ); + indexDocs(client(clusterAlias), remoteIndex); + } + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.index", localIndex); + clusterInfo.put("remote.index", remoteIndex); + return clusterInfo; + } + + private int indexDocs(Client client, String index) { + int numDocs = between(5, 20); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex(index).setSource("f", "v", "@timestamp", randomNonNegativeLong()).get(); + } + client.admin().indices().prepareRefresh(index).get(); + return numDocs; + } + + /** + * Annotation to mark specific cluster in a test as not to be skipped when unavailable + */ + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @interface SkipOverride { + String[] aliases(); + } + + /** + * Test rule to process skip annotations + */ + static class SkipUnavailableRule implements TestRule { + private final Map skipMap; + + SkipUnavailableRule(String... clusterAliases) { + this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); + } + + public Map getMap() { + return skipMap; + } + + @Override + public Statement apply(Statement base, Description description) { + // Check for annotation named "SkipOverride" and set the overrides accordingly + var aliases = description.getAnnotation(SkipOverride.class); + if (aliases != null) { + for (String alias : aliases.aliases()) { + skipMap.put(alias, false); + } + } + return base; + } + + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 076158ee22037..cc272042d5384 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -89,6 +89,7 @@ import static org.hamcrest.Matchers.array; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; @@ -711,6 +712,63 @@ public void testCancel() throws Exception { } } + public void testIndexMode() throws Exception { + Map indexModes = new HashMap<>(); + // metrics + { + final String metricsMapping = """ + { + "properties": { + "@timestamp": { "type": "date" }, + "hostname": { "type": "keyword", "time_series_dimension": true }, + "request_count" : { "type" : "long", "time_series_metric" : "counter" }, + "cluster": {"type": "keyword"} + } + } + """; + Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("hostname")).build(); + int numIndices = between(1, 5); + for (int i = 0; i < numIndices; i++) { + assertAcked(indicesAdmin().prepareCreate("test_metrics_" + i).setSettings(settings).setMapping(metricsMapping).get()); + indexModes.put("test_metrics_" + i, IndexMode.TIME_SERIES); + assertAcked(indicesAdmin().prepareCreate("test_old_metrics_" + i).setMapping(metricsMapping).get()); + indexModes.put("test_old_metrics_" + i, IndexMode.STANDARD); + } + } + // logsdb + { + final String logsMapping = """ + { + "properties": { + "@timestamp": { "type": "date" }, + "hostname": { "type": "keyword"}, + "request_count" : { "type" : "long"}, + "cluster": {"type": "keyword"} + } + } + """; + Settings settings = Settings.builder().put("mode", "logsdb").build(); + int numIndices = between(1, 5); + for (int i = 0; i < numIndices; i++) { + assertAcked(indicesAdmin().prepareCreate("test_logs_" + i).setSettings(settings).setMapping(logsMapping).get()); + indexModes.put("test_logs_" + i, IndexMode.LOGSDB); + assertAcked(indicesAdmin().prepareCreate("test_old_logs_" + i).setMapping(logsMapping).get()); + indexModes.put("test_old_logs_" + i, IndexMode.STANDARD); + } + } + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + request.setMergeResults(false); + request.indices("test_*"); + request.fields(randomFrom("*", "@timestamp", "host*")); + var resp = client().fieldCaps(request).get(); + assertThat(resp.getFailures(), empty()); + Map actualIndexModes = new HashMap<>(); + for (var indexResp : resp.getIndexResponses()) { + actualIndexModes.put(indexResp.getIndexName(), indexResp.getIndexMode()); + } + assertThat(actualIndexModes, equalTo(indexModes)); + } + private void assertIndices(FieldCapabilitiesResponse response, String... indices) { assertNotNull(response.getIndices()); Arrays.sort(indices); @@ -859,7 +917,7 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return new StringStoredFieldFieldLoader(fullPath(), leafName(), null) { + return new StringStoredFieldFieldLoader(fullPath(), leafName()) { @Override protected void write(XContentBuilder b, Object value) throws IOException { BytesRef ref = (BytesRef) value; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index ee60888d7a0a8..c59fc0f68c4d4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -73,6 +73,11 @@ public boolean needs_score() { return false; } + @Override + public boolean needs_termStats() { + return false; + } + @Override public ScoreScript newInstance(DocReader docReader) { return new MyScript(params1, lookup, ((DocValuesDocReader) docReader).getLeafReaderContext()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 0acf9be574ffe..81bb143c47729 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -93,7 +93,7 @@ protected int numberOfShards() { protected void setupSuiteScopeCluster() throws Exception { assertAcked( indicesAdmin().prepareCreate("idx") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setSettings(indexSettings(1, 0)) .setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword") ); List builders = new ArrayList<>(); @@ -634,11 +634,7 @@ public void testNoProfile() { * documents and that is hard to express in yaml. */ public void testFilterByFilter() throws InterruptedException, IOException { - assertAcked( - indicesAdmin().prepareCreate("dateidx") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) - .setMapping("date", "type=date") - ); + assertAcked(indicesAdmin().prepareCreate("dateidx").setSettings(indexSettings(1, 0)).setMapping("date", "type=date")); List builders = new ArrayList<>(); for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { String date = Instant.ofEpochSecond(i).toString(); @@ -713,7 +709,7 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio try { assertAcked( indicesAdmin().prepareCreate("date_filter_by_filter_disabled") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setSettings(indexSettings(1, 0)) .setMapping("date", "type=date", "keyword", "type=keyword") ); List builders = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java index 0d6d17cbaeb1f..636a7b033a4ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/rank/MockedRequestActionBasedRerankerIT.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java new file mode 100644 index 0000000000000..fa4cafc66c822 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java @@ -0,0 +1,755 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.equalTo; + +public class RankDocRetrieverBuilderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MockSearchService.TestPlugin.class); + } + + public record RetrieverSource(RetrieverBuilder retriever, SearchSourceBuilder source) {} + + private static String INDEX = "test_index"; + private static final String ID_FIELD = "_id"; + private static final String DOC_FIELD = "doc"; + private static final String TEXT_FIELD = "text"; + private static final String VECTOR_FIELD = "vector"; + private static final String TOPIC_FIELD = "topic"; + private static final String LAST_30D_FIELD = "views.last30d"; + private static final String ALL_TIME_FIELD = "views.all"; + + @Before + public void setup() throws Exception { + String mapping = """ + { + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "element_type": "float", + "index": true, + "similarity": "l2_norm", + "index_options": { + "type": "hnsw" + } + }, + "text": { + "type": "text" + }, + "doc": { + "type": "keyword" + }, + "topic": { + "type": "keyword" + }, + "views": { + "type": "nested", + "properties": { + "last30d": { + "type": "integer" + }, + "all": { + "type": "integer" + } + } + } + } + } + """; + createIndex(INDEX, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).build()); + admin().indices().preparePutMapping(INDEX).setSource(mapping, XContentType.JSON).get(); + indexDoc( + INDEX, + "doc_1", + DOC_FIELD, + "doc_1", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "the quick brown fox jumps over the lazy dog", + LAST_30D_FIELD, + 100 + ); + indexDoc( + INDEX, + "doc_2", + DOC_FIELD, + "doc_2", + TOPIC_FIELD, + "astronomy", + TEXT_FIELD, + "you know, for Search!", + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + LAST_30D_FIELD, + 3 + ); + indexDoc(INDEX, "doc_3", DOC_FIELD, "doc_3", TOPIC_FIELD, "technology", VECTOR_FIELD, new float[] { 6.0f, 6.0f, 6.0f }); + indexDoc( + INDEX, + "doc_4", + DOC_FIELD, + "doc_4", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "aardvark is a really awesome animal, but not very quick", + ALL_TIME_FIELD, + 100, + LAST_30D_FIELD, + 40 + ); + indexDoc(INDEX, "doc_5", DOC_FIELD, "doc_5", TOPIC_FIELD, "science", TEXT_FIELD, "irrelevant stuff"); + indexDoc( + INDEX, + "doc_6", + DOC_FIELD, + "doc_6", + TEXT_FIELD, + "quick quick quick quick search", + VECTOR_FIELD, + new float[] { 10.0f, 30.0f, 100.0f }, + LAST_30D_FIELD, + 15 + ); + indexDoc( + INDEX, + "doc_7", + DOC_FIELD, + "doc_7", + TOPIC_FIELD, + "biology", + TEXT_FIELD, + "dog", + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + ALL_TIME_FIELD, + 1000 + ); + refresh(INDEX); + } + + public void testRankDocsRetrieverBasicWithPagination() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 and with pagination, we'd just omit the first result + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + // include some pagination as well + source.from(1); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverWithAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 1; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(2L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithCollapse() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + // with collapsing on topic field we would have 6, 2, 1, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.collapse( + new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getHits().length, equalTo(4)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).field(TOPIC_FIELD).getValue().toString(), equalTo("astronomy")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).field(TOPIC_FIELD).getValue().toString(), equalTo("technology")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getHits().length, equalTo(3)); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(1).getId(), equalTo("doc_3")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).field(TOPIC_FIELD).getValue().toString(), equalTo("biology")); + }); + } + + public void testRankDocsRetrieverWithCollapseAndAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1 and 6 as doc_4 is collapsed to doc_1 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + standard0.collapseBuilder = new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(3L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithNestedQuery() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(10L), ScoreMode.Avg) + .innerHit(new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10)); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 3, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverMultipleCompoundRetrievers() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + CompoundRetrieverWithRankDocs compoundRetriever1 = new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ); + // simple standard retriever that would have the doc_4 as its first (and only) result + StandardRetrieverBuilder standard2 = new StandardRetrieverBuilder(); + standard2.queryBuilder = QueryBuilders.queryStringQuery("aardvark").defaultField(TEXT_FIELD); + + // combining the two retrievers would bring doc_4 at the top as it would be the only one present in both doc sets + // the rest of the docs would be sorted based on their ranks as they have the same score (1/2) + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(compoundRetriever1, null), new RetrieverSource(standard2, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverDifferentNestedSorting() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, 6, 2 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(0), ScoreMode.Avg); + standard0.sortBuilders = List.of( + new FieldSortBuilder(LAST_30D_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.DESC) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 4, 7 + standard1.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(ALL_TIME_FIELD).gt(0), ScoreMode.Avg); + standard1.sortBuilders = List.of( + new FieldSortBuilder(ALL_TIME_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.ASC) + ); + + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(standard0, null), new RetrieverSource(standard1, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_2")); + }); + } + + class CompoundRetrieverWithRankDocs extends RetrieverBuilder { + + private final List sources; + private final int rankWindowSize; + + private CompoundRetrieverWithRankDocs(int rankWindowSize, List sources) { + this.rankWindowSize = rankWindowSize; + this.sources = Collections.unmodifiableList(sources); + } + + @Override + public boolean isCompound() { + return true; + } + + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (ctx.getPointInTimeBuilder() == null) { + throw new IllegalStateException("PIT is required"); + } + + // Rewrite prefilters + boolean hasChanged = false; + var newPreFilters = rewritePreFilters(ctx); + hasChanged |= newPreFilters != preFilterQueryBuilders; + + // Rewrite retriever sources + List newRetrievers = new ArrayList<>(); + for (var entry : sources) { + RetrieverBuilder newRetriever = entry.retriever.rewrite(ctx); + if (newRetriever != entry.retriever) { + newRetrievers.add(new RetrieverSource(newRetriever, null)); + hasChanged |= newRetriever != entry.retriever; + } else if (newRetriever == entry.retriever) { + var sourceBuilder = entry.source != null + ? entry.source + : createSearchSourceBuilder(ctx.getPointInTimeBuilder(), newRetriever); + var rewrittenSource = sourceBuilder.rewrite(ctx); + newRetrievers.add(new RetrieverSource(newRetriever, rewrittenSource)); + hasChanged |= rewrittenSource != entry.source; + } + } + if (hasChanged) { + return new CompoundRetrieverWithRankDocs(rankWindowSize, newRetrievers); + } + + // execute searches + final SetOnce results = new SetOnce<>(); + final MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + for (var entry : sources) { + SearchRequest searchRequest = new SearchRequest().source(entry.source); + // The can match phase can reorder shards, so we disable it to ensure the stable ordering + searchRequest.setPreFilterShardSize(Integer.MAX_VALUE); + multiSearchRequest.add(searchRequest); + } + ctx.registerAsyncAction((client, listener) -> { + client.execute(TransportMultiSearchAction.TYPE, multiSearchRequest, new ActionListener<>() { + @Override + public void onResponse(MultiSearchResponse items) { + List topDocs = new ArrayList<>(); + for (int i = 0; i < items.getResponses().length; i++) { + var item = items.getResponses()[i]; + var rankDocs = getRankDocs(item.getResponse()); + sources.get(i).retriever().setRankDocs(rankDocs); + topDocs.add(rankDocs); + } + results.set(combineResults(topDocs)); + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + }); + + return new RankDocsRetrieverBuilder( + rankWindowSize, + newRetrievers.stream().map(s -> s.retriever).toList(), + results::get, + newPreFilters + ); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public String getName() { + return "compound_retriever"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + + } + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return 0; + } + + private RankDoc[] getRankDocs(SearchResponse searchResponse) { + assert searchResponse != null; + int size = Math.min(rankWindowSize, searchResponse.getHits().getHits().length); + RankDoc[] docs = new RankDoc[size]; + for (int i = 0; i < size; i++) { + var hit = searchResponse.getHits().getAt(i); + long sortValue = (long) hit.getRawSortValues()[hit.getRawSortValues().length - 1]; + int doc = decodeDoc(sortValue); + int shardRequestIndex = decodeShardRequestIndex(sortValue); + docs[i] = new RankDoc(doc, hit.getScore(), shardRequestIndex); + docs[i].rank = i + 1; + } + return docs; + } + + public static int decodeDoc(long value) { + return (int) value; + } + + public static int decodeShardRequestIndex(long value) { + return (int) (value >> 32); + } + + record RankDocAndHitRatio(RankDoc rankDoc, float hitRatio) {} + + /** + * Combines the provided {@code rankResults} to return the final top documents. + */ + public RankDoc[] combineResults(List rankResults) { + int totalQueries = rankResults.size(); + final float step = 1.0f / totalQueries; + Map docsToRankResults = Maps.newMapWithExpectedSize(rankWindowSize); + for (var rankResult : rankResults) { + for (RankDoc scoreDoc : rankResult) { + docsToRankResults.compute(new RankDoc.RankKey(scoreDoc.doc, scoreDoc.shardIndex), (key, value) -> { + if (value == null) { + RankDoc res = new RankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex); + res.rank = scoreDoc.rank; + return new RankDocAndHitRatio(res, step); + } else { + RankDoc res = new RankDoc(scoreDoc.doc, Math.max(scoreDoc.score, value.rankDoc.score), scoreDoc.shardIndex); + res.rank = Math.min(scoreDoc.rank, value.rankDoc.rank); + return new RankDocAndHitRatio(res, value.hitRatio + step); + } + }); + } + } + // sort the results based on hit ratio, then doc, then rank, and final tiebreaker is based on smaller doc id + RankDocAndHitRatio[] sortedResults = docsToRankResults.values().toArray(RankDocAndHitRatio[]::new); + Arrays.sort(sortedResults, (RankDocAndHitRatio doc1, RankDocAndHitRatio doc2) -> { + if (doc1.hitRatio != doc2.hitRatio) { + return doc1.hitRatio < doc2.hitRatio ? 1 : -1; + } + if (false == (Float.isNaN(doc1.rankDoc.score) || Float.isNaN(doc2.rankDoc.score)) + && (doc1.rankDoc.score != doc2.rankDoc.score)) { + return doc1.rankDoc.score < doc2.rankDoc.score ? 1 : -1; + } + if (doc1.rankDoc.rank != doc2.rankDoc.rank) { + return doc1.rankDoc.rank < doc2.rankDoc.rank ? -1 : 1; + } + return doc1.rankDoc.doc < doc2.rankDoc.doc ? -1 : 1; + }); + // trim the results if needed, otherwise each shard will always return `rank_window_size` results. + // pagination and all else will happen on the coordinator when combining the shard responses + RankDoc[] topResults = new RankDoc[Math.min(rankWindowSize, sortedResults.length)]; + for (int rank = 0; rank < topResults.length; ++rank) { + topResults[rank] = sortedResults[rank].rankDoc; + topResults[rank].rank = rank + 1; + topResults[rank].score = sortedResults[rank].hitRatio; + } + return topResults; + } + } + + private SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit).trackTotalHits(false).size(100); + retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, false); + + // Record the shard id in the sort result + List> sortBuilders = sourceBuilder.sorts() != null ? new ArrayList<>(sourceBuilder.sorts()) : new ArrayList<>(); + if (sortBuilders.isEmpty()) { + sortBuilders.add(new ScoreSortBuilder()); + } + sortBuilders.add(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + sourceBuilder.sort(sortBuilders); + return sourceBuilder; + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java new file mode 100644 index 0000000000000..e618a1b75cc4d --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java @@ -0,0 +1,275 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(numDataNodes = 3) +public class RetrieverRewriteIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return List.of(MockSearchService.TestPlugin.class); + } + + private static String INDEX_DOCS = "docs"; + private static String INDEX_QUERIES = "queries"; + private static final String ID_FIELD = "_id"; + private static final String QUERY_FIELD = "query"; + + @Before + public void setup() throws Exception { + createIndex(INDEX_DOCS); + index(INDEX_DOCS, "doc_0", "{}"); + index(INDEX_DOCS, "doc_1", "{}"); + index(INDEX_DOCS, "doc_2", "{}"); + refresh(INDEX_DOCS); + + createIndex(INDEX_QUERIES); + index(INDEX_QUERIES, "query_0", "{ \"" + QUERY_FIELD + "\": \"doc_2\"}"); + index(INDEX_QUERIES, "query_1", "{ \"" + QUERY_FIELD + "\": \"doc_1\"}"); + index(INDEX_QUERIES, "query_2", "{ \"" + QUERY_FIELD + "\": \"doc_0\"}"); + refresh(INDEX_QUERIES); + } + + public void testRewrite() { + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard = new StandardRetrieverBuilder(); + standard.queryBuilder = QueryBuilders.termQuery(ID_FIELD, "doc_0"); + source.retriever(new AssertingRetrieverBuilder(standard)); + SearchRequestBuilder req = client().prepareSearch(INDEX_DOCS, INDEX_QUERIES).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_0")); + }); + } + + public void testRewriteCompound() { + SearchSourceBuilder source = new SearchSourceBuilder(); + source.retriever(new AssertingCompoundRetrieverBuilder("query_0")); + SearchRequestBuilder req = client().prepareSearch(INDEX_DOCS, INDEX_QUERIES).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + }); + } + + public void testRewriteCompoundRetrieverShouldThrowForPartialResults() throws Exception { + final String testIndex = "test"; + createIndex(testIndex, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); + for (int i = 0; i < 50; i++) { + index(testIndex, "doc_" + i, "{}"); + } + refresh(testIndex); + + SearchSourceBuilder source = new SearchSourceBuilder(); + source.retriever(new AssertingCompoundRetrieverBuilder("doc_0")); + final String randomDataNode = internalCluster().getNodeNameThat( + settings -> DiscoveryNode.hasRole(settings, DiscoveryNodeRole.DATA_ROLE) + ); + try { + ensureGreen(testIndex); + if (false == internalCluster().stopNode(randomDataNode)) { + throw new IllegalStateException("node did not stop"); + } + assertBusy(() -> { + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(testIndex) + .setWaitForStatus(ClusterHealthStatus.RED) // we are now known red because the primary shard is missing + .setWaitForEvents(Priority.LANGUID) // ensures that the update has occurred + .execute() + .actionGet(); + assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); + }); + SearchPhaseExecutionException ex = expectThrows( + SearchPhaseExecutionException.class, + client().prepareSearch(testIndex).setSource(source)::get + ); + assertThat( + ex.getDetailedMessage(), + containsString("[open_point_in_time] action requires all shards to be available. Missing shards") + ); + } finally { + internalCluster().restartNode(randomDataNode); + } + } + + private static class AssertingRetrieverBuilder extends RetrieverBuilder { + private final RetrieverBuilder innerRetriever; + + private AssertingRetrieverBuilder(RetrieverBuilder innerRetriever) { + this.innerRetriever = innerRetriever; + } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + assertNull(ctx.getPointInTimeBuilder()); + assertNull(ctx.convertToInnerHitsRewriteContext()); + assertNull(ctx.convertToCoordinatorRewriteContext()); + assertNull(ctx.convertToIndexMetadataContext()); + assertNull(ctx.convertToSearchExecutionContext()); + assertNull(ctx.convertToDataRewriteContext()); + var newRetriever = innerRetriever.rewrite(ctx); + if (newRetriever != innerRetriever) { + return new AssertingRetrieverBuilder(newRetriever); + } + return this; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder sourceBuilder, boolean compoundUsed) { + assertNull(sourceBuilder.retriever()); + innerRetriever.extractToSearchSourceBuilder(sourceBuilder, compoundUsed); + } + + @Override + public String getName() { + return "asserting"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException {} + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return innerRetriever.doHashCode(); + } + } + + private static class AssertingCompoundRetrieverBuilder extends RetrieverBuilder { + private final String id; + private final SetOnce innerRetriever; + + private AssertingCompoundRetrieverBuilder(String id) { + this.id = id; + this.innerRetriever = new SetOnce<>(null); + } + + private AssertingCompoundRetrieverBuilder(String id, SetOnce innerRetriever) { + this.id = id; + this.innerRetriever = innerRetriever; + } + + @Override + public boolean isCompound() { + return true; + } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + assertNotNull(ctx.getPointInTimeBuilder()); + assertNull(ctx.convertToInnerHitsRewriteContext()); + assertNull(ctx.convertToCoordinatorRewriteContext()); + assertNull(ctx.convertToIndexMetadataContext()); + assertNull(ctx.convertToSearchExecutionContext()); + assertNull(ctx.convertToDataRewriteContext()); + if (innerRetriever.get() != null) { + return this; + } + SetOnce innerRetriever = new SetOnce<>(); + ctx.registerAsyncAction((client, actionListener) -> { + SearchSourceBuilder source = new SearchSourceBuilder().pointInTimeBuilder(ctx.getPointInTimeBuilder()) + .query(QueryBuilders.termQuery(ID_FIELD, id)) + .fetchField(QUERY_FIELD); + client.search(new SearchRequest().source(source), new ActionListener<>() { + @Override + public void onResponse(SearchResponse response) { + String query = response.getHits().getAt(0).field(QUERY_FIELD).getValue(); + StandardRetrieverBuilder standard = new StandardRetrieverBuilder(); + standard.queryBuilder = QueryBuilders.termQuery(ID_FIELD, query); + innerRetriever.set(standard); + actionListener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + actionListener.onFailure(e); + } + }); + }); + return new AssertingCompoundRetrieverBuilder(id, innerRetriever); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder sourceBuilder, boolean compoundUsed) { + assertNull(sourceBuilder.retriever()); + innerRetriever.get().extractToSearchSourceBuilder(sourceBuilder, compoundUsed); + } + + @Override + public String getName() { + return "asserting"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + throw new AssertionError("not implemented"); + } + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return id.hashCode(); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 836bd26f08eee..71616abf0dcfa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -2209,7 +2209,7 @@ public void testDeleteIndexWithOutOfOrderFinalization() { .anyMatch(e -> e.snapshot().getSnapshotId().getName().equals("snapshot-with-index-1") && e.state().completed()) ) // execute the index deletion _directly on the master_ so it happens before the snapshot finalization executes - .andThen((l, ignored) -> masterDeleteIndexService.deleteIndices(new DeleteIndexClusterStateUpdateRequest(l.map(r -> { + .andThen(l -> masterDeleteIndexService.deleteIndices(new DeleteIndexClusterStateUpdateRequest(l.map(r -> { assertTrue(r.isAcknowledged()); return null; })).indices(new Index[] { internalCluster().clusterService().state().metadata().index(indexToDelete).getIndex() }) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 1130ddaa74f38..477fd9737394e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -8,26 +8,60 @@ package org.elasticsearch.snapshots; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.SnapshotSortKey; +import org.elasticsearch.action.admin.cluster.snapshots.get.TransportGetSnapshotsAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.fs.FsBlobStore; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; @@ -745,4 +779,351 @@ private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String[] repoN return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoNames) .setSnapshots("*", "-" + AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*"); } + + public void testAllFeatures() { + // A test that uses (potentially) as many of the features of the get-snapshots API at once as possible, to verify that they interact + // in the expected order etc. + + // Create a few repositories and a few indices + final var repositories = randomList(1, 4, ESTestCase::randomIdentifier); + final var indices = randomList(1, 4, ESTestCase::randomIdentifier); + final var slmPolicies = randomList(1, 4, ESTestCase::randomIdentifier); + + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (final var repository : repositories) { + client().execute( + TransportPutRepositoryAction.TYPE, + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repository).type(FsRepository.TYPE) + .settings(Settings.builder().put("location", randomRepoPath()).build()), + listeners.acquire(ElasticsearchAssertions::assertAcked) + ); + } + + for (final var index : indices) { + client().execute( + TransportCreateIndexAction.TYPE, + new CreateIndexRequest(index, indexSettings(1, 0).build()), + listeners.acquire(ElasticsearchAssertions::assertAcked) + ); + } + } + }); + ensureGreen(); + + // Create a few snapshots + final var snapshotInfos = Collections.synchronizedList(new ArrayList()); + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (int i = 0; i < 10; i++) { + client().execute( + TransportCreateSnapshotAction.TYPE, + new CreateSnapshotRequest( + TEST_REQUEST_TIMEOUT, + // at least one snapshot per repository to satisfy consistency checks + i < repositories.size() ? repositories.get(i) : randomFrom(repositories), + randomIdentifier() + ).indices(randomNonEmptySubsetOf(indices)) + .userMetadata( + randomBoolean() ? Map.of() : Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, randomFrom(slmPolicies)) + ) + .waitForCompletion(true), + listeners.acquire( + createSnapshotResponse -> snapshotInfos.add(Objects.requireNonNull(createSnapshotResponse.getSnapshotInfo())) + ) + ); + } + } + }); + + if (randomBoolean()) { + // Sometimes also simulate bwc repository contents where some details are missing from the root blob + safeAwait(l -> { + try (var listeners = new RefCountingListener(l.map(v -> null))) { + for (final var repositoryName : randomSubsetOf(repositories)) { + removeDetailsForRandomSnapshots(repositoryName, listeners.acquire()); + } + } + }); + } + + Predicate snapshotInfoPredicate = Predicates.always(); + + // {repository} path parameter + final String[] requestedRepositories; + if (randomBoolean()) { + requestedRepositories = new String[] { randomFrom("_all", "*") }; + } else { + final var selectedRepositories = Set.copyOf(randomNonEmptySubsetOf(repositories)); + snapshotInfoPredicate = snapshotInfoPredicate.and(si -> selectedRepositories.contains(si.repository())); + requestedRepositories = selectedRepositories.toArray(new String[0]); + } + + // {snapshot} path parameter + final String[] requestedSnapshots; + if (randomBoolean()) { + requestedSnapshots = randomBoolean() ? Strings.EMPTY_ARRAY : new String[] { randomFrom("_all", "*") }; + } else { + final var selectedSnapshots = randomNonEmptySubsetOf(snapshotInfos).stream() + .map(si -> si.snapshotId().getName()) + .collect(Collectors.toSet()); + snapshotInfoPredicate = snapshotInfoPredicate.and(si -> selectedSnapshots.contains(si.snapshotId().getName())); + requestedSnapshots = selectedSnapshots.stream() + // if we have multiple repositories, add a trailing wildcard to each requested snapshot name, because if we specify exact + // names then there must be a snapshot with that name in every requested repository + .map(n -> repositories.size() == 1 && randomBoolean() ? n : n + "*") + .toArray(String[]::new); + } + + // ?slm_policy_filter parameter + final String[] requestedSlmPolicies; + switch (between(0, 3)) { + default -> requestedSlmPolicies = Strings.EMPTY_ARRAY; + case 1 -> { + requestedSlmPolicies = new String[] { "*" }; + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) != null + ); + } + case 2 -> { + requestedSlmPolicies = new String[] { "_none" }; + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) == null + ); + } + case 3 -> { + final var selectedPolicies = Set.copyOf(randomNonEmptySubsetOf(slmPolicies)); + requestedSlmPolicies = selectedPolicies.stream() + .map(policy -> randomBoolean() ? policy : policy + "*") + .toArray(String[]::new); + snapshotInfoPredicate = snapshotInfoPredicate.and( + si -> si.userMetadata().get(SnapshotsService.POLICY_ID_METADATA_FIELD) instanceof String policy + && selectedPolicies.contains(policy) + ); + } + } + + // ?sort and ?order parameters + final var sortKey = randomFrom(SnapshotSortKey.values()); + final var order = randomFrom(SortOrder.values()); + // NB we sometimes choose to sort by FAILED_SHARDS, but there are no failed shards in these snapshots. We're still testing the + // fallback sorting by snapshot ID in this case. We also have no multi-shard indices so there's no difference between sorting by + // INDICES and by SHARDS. The actual sorting behaviour for these cases is tested elsewhere, here we're just checking that sorting + // interacts correctly with the other parameters to the API. + + // compute the ordered sequence of snapshots which match the repository/snapshot name filters and SLM policy filter + final var selectedSnapshots = snapshotInfos.stream() + .filter(snapshotInfoPredicate) + .sorted(sortKey.getSnapshotInfoComparator(order)) + .toList(); + + final var getSnapshotsRequest = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, requestedRepositories, requestedSnapshots).policies( + requestedSlmPolicies + ) + // apply sorting params + .sort(sortKey) + .order(order); + + // sometimes use ?from_sort_value to skip some items; note that snapshots skipped in this way are subtracted from + // GetSnapshotsResponse.totalCount whereas snapshots skipped by ?after and ?offset are not + final int skippedByFromSortValue; + if (randomBoolean()) { + final var startingSnapshot = randomFrom(snapshotInfos); + getSnapshotsRequest.fromSortValue(switch (sortKey) { + case START_TIME -> Long.toString(startingSnapshot.startTime()); + case NAME -> startingSnapshot.snapshotId().getName(); + case DURATION -> Long.toString(startingSnapshot.endTime() - startingSnapshot.startTime()); + case INDICES, SHARDS -> Integer.toString(startingSnapshot.indices().size()); + case FAILED_SHARDS -> "0"; + case REPOSITORY -> startingSnapshot.repository(); + }); + final Predicate fromSortValuePredicate = snapshotInfo -> { + final var comparison = switch (sortKey) { + case START_TIME -> Long.compare(snapshotInfo.startTime(), startingSnapshot.startTime()); + case NAME -> snapshotInfo.snapshotId().getName().compareTo(startingSnapshot.snapshotId().getName()); + case DURATION -> Long.compare( + snapshotInfo.endTime() - snapshotInfo.startTime(), + startingSnapshot.endTime() - startingSnapshot.startTime() + ); + case INDICES, SHARDS -> Integer.compare(snapshotInfo.indices().size(), startingSnapshot.indices().size()); + case FAILED_SHARDS -> 0; + case REPOSITORY -> snapshotInfo.repository().compareTo(startingSnapshot.repository()); + }; + return order == SortOrder.ASC ? comparison < 0 : comparison > 0; + }; + + int skipCount = 0; + for (final var snapshotInfo : selectedSnapshots) { + if (fromSortValuePredicate.test(snapshotInfo)) { + skipCount += 1; + } else { + break; + } + } + skippedByFromSortValue = skipCount; + } else { + skippedByFromSortValue = 0; + } + + // ?offset parameter + if (randomBoolean()) { + getSnapshotsRequest.offset(between(0, selectedSnapshots.size() + 1)); + } + + // ?size parameter + if (randomBoolean()) { + getSnapshotsRequest.size(between(1, selectedSnapshots.size() + 1)); + } + + // compute the expected offset and size of the returned snapshots as indices in selectedSnapshots: + final var expectedOffset = Math.min(selectedSnapshots.size(), skippedByFromSortValue + getSnapshotsRequest.offset()); + final var expectedSize = Math.min( + selectedSnapshots.size() - expectedOffset, + getSnapshotsRequest.size() == GetSnapshotsRequest.NO_LIMIT ? Integer.MAX_VALUE : getSnapshotsRequest.size() + ); + + // get the actual response + final GetSnapshotsResponse getSnapshotsResponse = safeAwait( + l -> client().execute(TransportGetSnapshotsAction.TYPE, getSnapshotsRequest, l) + ); + + // verify it returns the expected results + assertEquals( + selectedSnapshots.stream().skip(expectedOffset).limit(expectedSize).map(SnapshotInfo::snapshotId).toList(), + getSnapshotsResponse.getSnapshots().stream().map(SnapshotInfo::snapshotId).toList() + ); + assertEquals(expectedSize, getSnapshotsResponse.getSnapshots().size()); + assertEquals(selectedSnapshots.size() - skippedByFromSortValue, getSnapshotsResponse.totalCount()); + assertEquals(selectedSnapshots.size() - expectedOffset - expectedSize, getSnapshotsResponse.remaining()); + assertEquals(getSnapshotsResponse.remaining() > 0, getSnapshotsResponse.next() != null); + + // now use ?after to page through the rest of the results + var nextRequestAfter = getSnapshotsResponse.next(); + var nextExpectedOffset = expectedOffset + expectedSize; + var remaining = getSnapshotsResponse.remaining(); + while (nextRequestAfter != null) { + final var nextSize = between(1, remaining); + final var nextRequest = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, requestedRepositories, requestedSnapshots) + // same name/policy filters, same ?sort and ?order params, new ?size, but no ?offset or ?from_sort_value because of ?after + .policies(requestedSlmPolicies) + .sort(sortKey) + .order(order) + .size(nextSize) + .after(SnapshotSortKey.decodeAfterQueryParam(nextRequestAfter)); + final GetSnapshotsResponse nextResponse = safeAwait(l -> client().execute(TransportGetSnapshotsAction.TYPE, nextRequest, l)); + + assertEquals( + selectedSnapshots.stream().skip(nextExpectedOffset).limit(nextSize).map(SnapshotInfo::snapshotId).toList(), + nextResponse.getSnapshots().stream().map(SnapshotInfo::snapshotId).toList() + ); + assertEquals(nextSize, nextResponse.getSnapshots().size()); + assertEquals(selectedSnapshots.size(), nextResponse.totalCount()); + assertEquals(remaining - nextSize, nextResponse.remaining()); + assertEquals(nextResponse.remaining() > 0, nextResponse.next() != null); + + nextRequestAfter = nextResponse.next(); + nextExpectedOffset += nextSize; + remaining -= nextSize; + } + + assertEquals(0, remaining); + } + + /** + * Older versions of Elasticsearch don't record in {@link RepositoryData} all the details needed for the get-snapshots API to pick out + * the right snapshots, so in this case the API must fall back to reading those details from each candidate {@link SnapshotInfo} blob. + * Simulate this situation by manipulating the {@link RepositoryData} blob directly to remove all the optional details from some subset + * of its snapshots. + */ + private static void removeDetailsForRandomSnapshots(String repositoryName, ActionListener listener) { + final Set snapshotsWithoutDetails = ConcurrentCollections.newConcurrentSet(); + final var masterRepositoriesService = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class); + final var repository = asInstanceOf(FsRepository.class, masterRepositoriesService.repository(repositoryName)); + final var repositoryMetadata = repository.getMetadata(); + final var repositorySettings = repositoryMetadata.settings(); + final var repositoryDataBlobPath = asInstanceOf(FsBlobStore.class, repository.blobStore()).path() + .resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryMetadata.generation()); + + SubscribableListener + + // unregister the repository while we're mucking around with its internals + .newForked( + l -> client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName), + l + ) + ) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + // rewrite the RepositoryData blob with some details removed + .andThenAccept(ignored -> { + // load the existing RepositoryData JSON blob as raw maps/lists/etc. + final var repositoryDataBytes = Files.readAllBytes(repositoryDataBlobPath); + final var repositoryDataMap = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + repositoryDataBytes, + 0, + repositoryDataBytes.length, + true + ); + + // modify the contents + final var snapshotsList = asInstanceOf(List.class, repositoryDataMap.get("snapshots")); + for (final var snapshotObj : snapshotsList) { + if (randomBoolean()) { + continue; + } + final var snapshotMap = asInstanceOf(Map.class, snapshotObj); + snapshotsWithoutDetails.add( + new SnapshotId( + asInstanceOf(String.class, snapshotMap.get("name")), + asInstanceOf(String.class, snapshotMap.get("uuid")) + ) + ); + + // remove the optional details fields + assertNotNull(snapshotMap.remove("start_time_millis")); + assertNotNull(snapshotMap.remove("end_time_millis")); + assertNotNull(snapshotMap.remove("slm_policy")); + } + + // overwrite the RepositoryData JSON blob with its new contents + final var updatedRepositoryDataBytes = XContentTestUtils.convertToXContent(repositoryDataMap, XContentType.JSON); + try (var outputStream = Files.newOutputStream(repositoryDataBlobPath)) { + BytesRef bytesRef; + final var iterator = updatedRepositoryDataBytes.iterator(); + while ((bytesRef = iterator.next()) != null) { + outputStream.write(bytesRef.bytes, bytesRef.offset, bytesRef.length); + } + } + }) + + // re-register the repository + .andThen( + l -> client().execute( + TransportPutRepositoryAction.TYPE, + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName).type(FsRepository.TYPE) + .settings(repositorySettings), + l + ) + ) + .andThenAccept(ElasticsearchAssertions::assertAcked) + + // verify that the details are indeed now missing + .andThen( + l -> masterRepositoriesService.repository(repositoryName).getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, l) + ) + .andThenAccept(repositoryData -> { + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + assertEquals( + repositoryName + "/" + snapshotId.toString() + ": " + repositoryData.getSnapshotDetails(snapshotId), + snapshotsWithoutDetails.contains(snapshotId), + repositoryData.hasMissingDetails(snapshotId) + ); + } + }) + + .addListener(listener); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 057d7124f83d9..d4c0a4c80a3b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -298,7 +298,7 @@ public void testRepositoryConflict() throws Exception { logger.info("--> snapshot"); final String index = "test-idx"; - assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + assertAcked(prepareCreate(index, 1, indexSettings(1, 0))); for (int i = 0; i < 10; i++) { indexDoc(index, Integer.toString(i), "foo", "bar" + i); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a651537c77539..531e9f4f45afa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1788,9 +1788,7 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception { final String index = "test-idx"; final String snapshot = "test-snap"; - assertAcked( - prepareCreate(index, 1, Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas)) - ); + assertAcked(prepareCreate(index, 1, indexSettings(numPrimaries, numReplicas))); indexRandomDocs(index, 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index b0c5e73de5859..8f2702099c102 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -12,11 +12,14 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; @@ -29,6 +32,7 @@ import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateMissing; @@ -96,14 +100,7 @@ public void testIncludeGlobalState() throws Exception { if (testScript) { logger.info("--> creating test script"); - assertAcked( - clusterAdmin().preparePutStoredScript() - .setId("foobar") - .setContent( - new BytesArray("{\"script\": { \"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"1\"} }"), - XContentType.JSON - ) - ); + putJsonStoredScript("foobar", "{\"script\": { \"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"1\"} }"); } logger.info("--> snapshot without global state"); @@ -152,7 +149,12 @@ public void testIncludeGlobalState() throws Exception { if (testScript) { logger.info("--> delete test script"); - assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get()); + assertAcked( + safeExecute( + TransportDeleteStoredScriptAction.TYPE, + new DeleteStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "foobar") + ) + ); } logger.info("--> try restoring from snapshot without global state"); @@ -188,7 +190,10 @@ public void testIncludeGlobalState() throws Exception { if (testScript) { logger.info("--> check that script is restored"); - GetStoredScriptResponse getStoredScriptResponse = clusterAdmin().prepareGetStoredScript("foobar").get(); + GetStoredScriptResponse getStoredScriptResponse = safeExecute( + GetStoredScriptAction.INSTANCE, + new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar") + ); assertNotNull(getStoredScriptResponse.getSource()); } @@ -217,7 +222,12 @@ public void testIncludeGlobalState() throws Exception { } if (testScript) { - assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get()); + assertAcked( + safeExecute( + TransportDeleteStoredScriptAction.TYPE, + new DeleteStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "foobar") + ) + ); } getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); @@ -236,7 +246,7 @@ public void testIncludeGlobalState() throws Exception { getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); assertFalse(clusterAdmin().prepareGetPipeline("barbaz").get().isFound()); - assertNull(clusterAdmin().prepareGetStoredScript("foobar").get().getSource()); + assertNull(safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar")).getSource()); assertDocCount("test-idx", 100L); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index a45471f273732..2d1e16dc64273 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -483,7 +483,7 @@ private static void putShutdownForRemovalMetadata(ClusterService clusterService, SubscribableListener .newForked(l -> putShutdownMetadata(clusterService, shutdownMetadata, nodeName, l)) - .andThen((l, ignored) -> flushMasterQueue(clusterService, l)) + .andThen(l -> flushMasterQueue(clusterService, l)) .addListener(listener); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index b8b6dcb25b557..9c9076dff00e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -1257,7 +1256,7 @@ public void clusterStateProcessed(ClusterState initialState, ClusterState newSta ) .andThen( - (l, ignored) -> clusterService.submitUnbatchedStateUpdateTask( + l -> clusterService.submitUnbatchedStateUpdateTask( "unmark [" + node + "] for removal", new ClusterStateUpdateTask() { @Override @@ -1461,11 +1460,7 @@ private void createIndexAndContinue(Releasable releasable) { docPermits = new Semaphore(between(1000, 3000)); logger.info("--> create index [{}] with max [{}] docs", indexName, docPermits.availablePermits()); indicesAdmin().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, cluster.numDataNodes() - 1)) - ) + .setSettings(indexSettings(shardCount, between(0, cluster.numDataNodes() - 1))) .execute(mustSucceed(response -> { assertTrue(response.isAcknowledged()); logger.info("--> finished create index [{}]", indexName); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 1c07b5b4564ec..086bfece87172 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -190,14 +190,15 @@ exports org.elasticsearch.common.file; exports org.elasticsearch.common.geo; exports org.elasticsearch.common.hash; - exports org.elasticsearch.common.inject; - exports org.elasticsearch.common.inject.binder; - exports org.elasticsearch.common.inject.internal; - exports org.elasticsearch.common.inject.matcher; - exports org.elasticsearch.common.inject.multibindings; - exports org.elasticsearch.common.inject.name; - exports org.elasticsearch.common.inject.spi; - exports org.elasticsearch.common.inject.util; + exports org.elasticsearch.injection.api; + exports org.elasticsearch.injection.guice; + exports org.elasticsearch.injection.guice.binder; + exports org.elasticsearch.injection.guice.internal; + exports org.elasticsearch.injection.guice.matcher; + exports org.elasticsearch.injection.guice.multibindings; + exports org.elasticsearch.injection.guice.name; + exports org.elasticsearch.injection.guice.spi; + exports org.elasticsearch.injection.guice.util; exports org.elasticsearch.common.io; exports org.elasticsearch.common.io.stream; exports org.elasticsearch.common.logging; @@ -364,6 +365,7 @@ exports org.elasticsearch.search.rank.rerank; exports org.elasticsearch.search.rescore; exports org.elasticsearch.search.retriever; + exports org.elasticsearch.search.retriever.rankdoc; exports org.elasticsearch.search.runtime; exports org.elasticsearch.search.searchafter; exports org.elasticsearch.search.slice; @@ -428,6 +430,7 @@ org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, org.elasticsearch.indices.IndicesFeatures, + org.elasticsearch.repositories.RepositoriesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.ingest.IngestGeoIpFeatures, diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 2983a2d62de71..d7db8f4ec09dd 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1908,25 +1908,31 @@ private enum ElasticsearchExceptionHandle { FailureIndexNotSupportedException.class, FailureIndexNotSupportedException::new, 178, - TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS + TransportVersions.V_8_14_0 ), NOT_PERSISTENT_TASK_NODE_EXCEPTION( NotPersistentTaskNodeException.class, NotPersistentTaskNodeException::new, 179, - TransportVersions.ADD_PERSISTENT_TASK_EXCEPTIONS + TransportVersions.V_8_14_0 ), PERSISTENT_TASK_NODE_NOT_ASSIGNED_EXCEPTION( PersistentTaskNodeNotAssignedException.class, PersistentTaskNodeNotAssignedException::new, 180, - TransportVersions.ADD_PERSISTENT_TASK_EXCEPTIONS + TransportVersions.V_8_14_0 ), RESOURCE_ALREADY_UPLOADED_EXCEPTION( ResourceAlreadyUploadedException.class, ResourceAlreadyUploadedException::new, 181, TransportVersions.ADD_RESOURCE_ALREADY_UPLOADED_EXCEPTION + ), + INGEST_PIPELINE_EXCEPTION( + org.elasticsearch.ingest.IngestPipelineException.class, + org.elasticsearch.ingest.IngestPipelineException::new, + 182, + TransportVersions.INGEST_PIPELINE_EXCEPTION_ADDED ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index b67b59aeee076..3e109fb1600b9 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -97,6 +97,193 @@ public static String stackTrace(Throwable e) { return stackTraceStringWriter.toString(); } + /** + * Constructs a limited and compressed stack trace string. Each exception printed as part of the full stack trace will have its printed + * stack frames capped at the given trace depth. Stack traces that are longer than the given trace depth will summarize the count of the + * remaining frames at the end of the trace. Each stack frame omits the module information and limits the package names to single + * characters per part. + *

+ * An example result when using a trace depth of 2 and one nested cause: + *

+     * o.e.s.GenericException: some generic exception!
+     *   at o.e.s.SomeClass.method(SomeClass.java:100)
+     *   at o.e.s.SomeOtherClass.earlierMethod(SomeOtherClass.java:24)
+     *   ... 5 more
+     * Caused by: o.e.s.GenericException: some other generic exception!
+     *   at o.e.s.SomeClass.method(SomeClass.java:115)
+     *   at o.e.s.SomeOtherClass.earlierMethod(SomeOtherClass.java:16)
+     *   ... 12 more
+     * 
+ * + * @param e Throwable object to construct a printed stack trace for + * @param traceDepth The maximum number of stack trace elements to display per exception referenced + * @return A string containing a limited and compressed stack trace. + */ + public static String limitedStackTrace(Throwable e, int traceDepth) { + assert traceDepth >= 0 : "Cannot print stacktraces with negative trace depths"; + StringWriter stackTraceStringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stackTraceStringWriter); + printLimitedStackTrace(e, printWriter, traceDepth); + return stackTraceStringWriter.toString(); + } + + /** Caption for labeling causative exception stack traces */ + private static final String CAUSE_CAPTION = "Caused by: "; + /** Caption for labeling suppressed exception stack traces */ + private static final String SUPPRESSED_CAPTION = "Suppressed: "; + + private static void printLimitedStackTrace(Throwable e, PrintWriter s, int maxLines) { + // Guard against malicious overrides of Throwable.equals by + // using a Set with identity equality semantics. + Set dejaVu = Collections.newSetFromMap(new IdentityHashMap<>()); + dejaVu.add(e); + + // Print our stack trace + s.println(compressExceptionMessage(e)); + StackTraceElement[] trace = e.getStackTrace(); + int linesPrinted = 0; + for (StackTraceElement traceElement : trace) { + if (linesPrinted >= maxLines) { + break; + } else { + s.println(compressStackTraceElement(new StringBuilder("\tat "), traceElement)); + linesPrinted++; + } + } + if (trace.length > linesPrinted) { + s.println("\t... " + (trace.length - linesPrinted) + " more"); + } + + // Print suppressed exceptions, if any + for (Throwable se : e.getSuppressed()) { + limitAndPrintEnclosedStackTrace(se, s, trace, SUPPRESSED_CAPTION, "\t", maxLines, dejaVu); + } + + // Print cause, if any + Throwable ourCause = e.getCause(); + if (ourCause != null) { + limitAndPrintEnclosedStackTrace(ourCause, s, trace, CAUSE_CAPTION, "", maxLines, dejaVu); + } + } + + private static void limitAndPrintEnclosedStackTrace( + Throwable e, + PrintWriter s, + StackTraceElement[] enclosingTrace, + String caption, + String prefix, + int maxLines, + Set dejaVu + ) { + if (dejaVu.contains(e)) { + s.println(prefix + caption + "[CIRCULAR REFERENCE: " + compressExceptionMessage(e) + "]"); + } else { + dejaVu.add(e); + // Compute number of frames in common between this and enclosing trace + StackTraceElement[] trace = e.getStackTrace(); + int m = trace.length - 1; + int n = enclosingTrace.length - 1; + while (m >= 0 && n >= 0 && trace[m].equals(enclosingTrace[n])) { + m--; + n--; + } + int framesInCommon = trace.length - 1 - m; + + // Instead of breaking out of the print loop below when it reaches the maximum + // print lines, we simply cap how many frames we plan on printing here. + int linesToPrint = m + 1; + if (linesToPrint > maxLines) { + // The print loop below is "<=" based instead of "<", so subtract + // one from the max lines to convert a count value to an array index + // value and avoid an off by one error. + m = maxLines - 1; + framesInCommon = trace.length - 1 - m; + } + + // Print our stack trace + s.println(prefix + caption + compressExceptionMessage(e)); + for (int i = 0; i <= m; i++) { + s.println(compressStackTraceElement(new StringBuilder(prefix).append("\tat "), trace[i])); + } + if (framesInCommon != 0) { + s.println(prefix + "\t... " + framesInCommon + " more"); + } + + // Print suppressed exceptions, if any + for (Throwable se : e.getSuppressed()) { + limitAndPrintEnclosedStackTrace(se, s, trace, SUPPRESSED_CAPTION, prefix + "\t", maxLines, dejaVu); + } + + // Print cause, if any + Throwable ourCause = e.getCause(); + if (ourCause != null) { + limitAndPrintEnclosedStackTrace(ourCause, s, trace, CAUSE_CAPTION, prefix, maxLines, dejaVu); + } + } + } + + private static String compressExceptionMessage(Throwable e) { + StringBuilder msg = new StringBuilder(); + compressPackages(msg, e.getClass().getName()); + String message = e.getLocalizedMessage(); + if (message != null) { + msg.append(": ").append(message); + } + return msg.toString(); + } + + private static StringBuilder compressStackTraceElement(StringBuilder s, final StackTraceElement stackTraceElement) { + String declaringClass = stackTraceElement.getClassName(); + compressPackages(s, declaringClass); + + String methodName = stackTraceElement.getMethodName(); + s.append(".").append(methodName).append("("); + + if (stackTraceElement.isNativeMethod()) { + s.append("Native Method)"); + } else { + String fileName = stackTraceElement.getFileName(); + int lineNumber = stackTraceElement.getLineNumber(); + if (fileName != null && lineNumber >= 0) { + s.append(fileName).append(":").append(lineNumber).append(")"); + } else if (fileName != null) { + s.append(fileName).append(")"); + } else { + s.append("Unknown Source)"); + } + } + return s; + } + + // Visible for testing + static void compressPackages(StringBuilder s, String className) { + assert s != null : "s cannot be null"; + assert className != null : "className cannot be null"; + int finalDot = className.lastIndexOf('.'); + if (finalDot < 0) { + s.append(className); + return; + } + int lastPackageName = className.lastIndexOf('.', finalDot - 1); + if (lastPackageName < 0) { + if (finalDot >= 1) { + s.append(className.charAt(0)).append('.'); + } + s.append(className.substring(finalDot + 1)); + return; + } + boolean firstChar = true; + char[] charArray = className.toCharArray(); + for (int idx = 0; idx <= lastPackageName + 1; idx++) { + char c = charArray[idx]; + if (firstChar && '.' != c) { + s.append(c).append('.'); + } + firstChar = '.' == c; + } + s.append(className.substring(finalDot + 1)); + } + public static String formatStackTrace(final StackTraceElement[] stackTrace) { return Arrays.stream(stackTrace).skip(1).map(e -> "\tat " + e).collect(Collectors.joining("\n")); } diff --git a/server/src/main/java/org/elasticsearch/ReleaseVersions.java b/server/src/main/java/org/elasticsearch/ReleaseVersions.java index 7b5c8d1d42382..cacdca1c5b528 100644 --- a/server/src/main/java/org/elasticsearch/ReleaseVersions.java +++ b/server/src/main/java/org/elasticsearch/ReleaseVersions.java @@ -41,7 +41,7 @@ public class ReleaseVersions { private static final Pattern VERSION_LINE = Pattern.compile("(\\d+\\.\\d+\\.\\d+),(\\d+)"); - public static IntFunction generateVersionsLookup(Class versionContainer) { + public static IntFunction generateVersionsLookup(Class versionContainer, int current) { if (USES_VERSIONS == false) return Integer::toString; try { @@ -52,6 +52,9 @@ public static IntFunction generateVersionsLookup(Class versionContain } NavigableMap> versions = new TreeMap<>(); + // add the current version id, which won't be in the csv + versions.put(current, List.of(Version.CURRENT)); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(versionsFile, StandardCharsets.UTF_8))) { String line; while ((line = reader.readLine()) != null) { @@ -121,8 +124,8 @@ private static IntFunction lookupFunction(NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); @@ -339,7 +324,7 @@ static Collection getAllVersions() { return VERSION_IDS.values(); } - static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(TransportVersions.class); + static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(TransportVersions.class, LATEST_DEFINED.id()); // no instance private TransportVersions() {} diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index fefe2ea486485..333669ca8079c 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -123,6 +123,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_21 = new Version(7_17_21_99); public static final Version V_7_17_22 = new Version(7_17_22_99); public static final Version V_7_17_23 = new Version(7_17_23_99); + public static final Version V_7_17_24 = new Version(7_17_24_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); @@ -179,8 +180,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_14_1 = new Version(8_14_01_99); public static final Version V_8_14_2 = new Version(8_14_02_99); public static final Version V_8_14_3 = new Version(8_14_03_99); - public static final Version V_8_14_4 = new Version(8_14_04_99); public static final Version V_8_15_0 = new Version(8_15_00_99); + public static final Version V_8_15_1 = new Version(8_15_01_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version CURRENT = V_8_16_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index ec01d88cb5e6e..f3fa1dd2e105f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -187,6 +187,13 @@ default ActionListener delegateFailureAndWrap(CheckedBiConsumer(this, bc); } + /** + * Same as {@link #delegateFailureAndWrap(CheckedBiConsumer)} except that the response is ignored and not passed to the delegate. + */ + default ActionListener delegateFailureIgnoreResponseAndWrap(CheckedConsumer, ? extends Exception> c) { + return new ActionListenerImplementations.ResponseDroppingActionListener<>(this, c); + } + /** * Creates a listener which releases the given resource on completion (whether success or failure) */ diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerImplementations.java b/server/src/main/java/org/elasticsearch/action/ActionListenerImplementations.java index bf4f2dcc2d8db..93f6ac6e49d41 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListenerImplementations.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListenerImplementations.java @@ -9,6 +9,7 @@ package org.elasticsearch.action; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.Releasable; @@ -254,6 +255,33 @@ public String toString() { } } + /** + * The same as {@link ResponseWrappingActionListener} except that the response is dropped + */ + static final class ResponseDroppingActionListener extends DelegatingActionListener { + + private final CheckedConsumer, ? extends Exception> consumer; + + ResponseDroppingActionListener(ActionListener delegate, CheckedConsumer, ? extends Exception> consumer) { + super(delegate); + this.consumer = consumer; + } + + @Override + public void onResponse(T ignored) { + try { + consumer.accept(delegate); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public String toString() { + return super.toString() + "/" + consumer; + } + } + static final class RunAfterActionListener extends DelegatingActionListener { private final Runnable runAfter; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a9c6894355cb6..37a33eab4e4e8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -218,9 +218,6 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.NamedRegistry; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.TypeLiteral; -import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -241,6 +238,9 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetadata; +import org.elasticsearch.injection.guice.AbstractModule; +import org.elasticsearch.injection.guice.TypeLiteral; +import org.elasticsearch.injection.guice.multibindings.MapBinder; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.persistent.RemovePersistentTaskAction; import org.elasticsearch.persistent.StartPersistentTaskAction; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 8e6f029c71013..c8d6a25c3ba6e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java index 76a9b5a245a84..f296dbb9bb5cb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceAction.java @@ -27,8 +27,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.Nullable; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index 228ad640f9fca..2f094b0fc6006 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -25,12 +25,12 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -78,7 +78,7 @@ public TransportGetAllocationStatsAction( @Override protected void doExecute(Task task, Request request, ActionListener listener) { - if (clusterService.state().getMinTransportVersion().before(TransportVersions.ALLOCATION_STATS)) { + if (clusterService.state().getMinTransportVersion().before(TransportVersions.V_8_14_0)) { // The action is not available before ALLOCATION_STATS listener.onResponse(new Response(Map.of(), null)); return; @@ -108,6 +108,7 @@ public static class Request extends MasterNodeReadRequest { private final EnumSet metrics; + @SuppressWarnings("this-escape") public Request(TimeValue masterNodeTimeout, TaskId parentTaskId, EnumSet metrics) { super(masterNodeTimeout); setParentTask(parentTaskId); @@ -123,7 +124,7 @@ public Request(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS); + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0); super.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS)) { out.writeEnumSet(metrics); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java index fca7b5c44fd29..8337353e1f40d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java @@ -28,8 +28,8 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardAssignment; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.Nullable; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index c540d535e60d4..12db5e664a784 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -37,6 +36,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index bbe292e817389..29aa93cd7d513 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -29,10 +29,10 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java index 95025c851fd94..ff73bc0b9c56a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoAction.java @@ -17,9 +17,9 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.coordination.ClusterFormationFailureHelper; import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java index 858a85b6bdfe8..bd41f5933e058 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/CoordinationDiagnosticsAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java index b38cee5e2a042..82a2ede90c945 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/coordination/MasterHistoryAction.java @@ -17,10 +17,10 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.coordination.MasterHistoryService; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index 46e41d306cefe..11bdd41f458d3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -25,10 +25,10 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Tuple; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportGetDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportGetDesiredNodesAction.java index e06918355e7a9..6909ce1f9366b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportGetDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportGetDesiredNodesAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index ee8295381dd88..9ec8feeb5d405 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -28,9 +28,9 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 8cca2c5bf6472..522bbb4eee629 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -31,12 +31,12 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index 63c2be9050ab0..ae538e7c72334 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -17,12 +17,12 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java index 281e26a44f335..485f91f0fd4a2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java @@ -19,9 +19,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesRequest.java index c69d273727238..7adcff9f19ccb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodesCapabilitiesRequest.java @@ -24,10 +24,15 @@ public class NodesCapabilitiesRequest extends BaseNodesRequest exceptions = new ArrayList<>(); // broadcast the new settings object (with the open embedded keystore) to all reloadable plugins pluginsService.filterPlugins(ReloadablePlugin.class).forEach(p -> { + logger.debug("Reloading plugin [" + p.getClass().getSimpleName() + "]"); try { p.reload(settingsWithKeystore); } catch (final Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java index 901f8b1e83c69..052263a00811d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java index 40fff1b77a4fb..401a415756abe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index a438983e855e9..1a53cec1bdbd7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -125,7 +125,7 @@ public NodeStats(StreamInput in) throws IOException { repositoriesStats = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RepositoriesStats::new) : null; - nodeAllocationStats = in.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS) + nodeAllocationStats = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalWriteable(NodeAllocationStats::new) : null; } @@ -337,7 +337,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOCATION_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeOptionalWriteable(nodeAllocationStats); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 3416b77fdd7fd..be2cd5fb187a3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -22,9 +22,9 @@ import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.tasks.CancellableTask; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index 4582a1cb26f82..1ff0a78575cac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 85de3c65c798e..36d31f9ec59fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -24,12 +24,12 @@ import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index c4888b9900428..83b4b2fc45626 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -17,12 +17,12 @@ import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index 72bbe2683d157..1f8a049a678e5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -15,9 +15,9 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.aggregations.support.AggregationUsageService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java index 5f3fd654eeb84..71a5614cf5b25 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.RemoteClusterServerInfo; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index c7d74fc414115..b72b573796b1e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 237e241c8900f..e6c5b63b5c2cf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -24,11 +24,11 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.blobstore.DeleteResult; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryCleanupResult; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 301cc6a4255f7..0b3645cd47364 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index bed02ef2cbc19..df7e235c00a3f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.ResolvedRepositories; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index eb7e26b30e874..29999f98b553c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 353cc2994afc7..84e3ed77c64fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 7eea49861333e..3d90b4061f194 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -35,11 +35,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponseHandler; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java index 302efb5867065..41d010a6a9f5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index e4093486da39c..de53ef70ff54a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -26,12 +26,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 91c302c8aa7be..1f53840c20c06 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Objects; @@ -31,22 +32,16 @@ public final class ClusterSearchShardsRequest extends MasterNodeReadRequest { - - public ClusterSearchShardsRequestBuilder(ElasticsearchClient client) { - super(client, TransportClusterSearchShardsAction.TYPE, new ClusterSearchShardsRequest()); - } - - /** - * Sets the indices the search will be executed on. - */ - public ClusterSearchShardsRequestBuilder setIndices(String... indices) { - request.indices(indices); - return this; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public ClusterSearchShardsRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public ClusterSearchShardsRequestBuilder setRouting(String... routing) { - request.routing(routing); - return this; - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * {@code _local} to prefer local shards or a custom value, which guarantees that the same order - * will be used across different requests. - */ - public ClusterSearchShardsRequestBuilder setPreference(String preference) { - request.preference(preference); - return this; - } - - /** - * Specifies what type of requested indices to ignore and how to deal indices wildcard expressions. - * For example indices that don't exist. - */ - public ClusterSearchShardsRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request().indicesOptions(indicesOptions); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index b7164f81c71ac..e727809ec56c1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -21,10 +21,10 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.Predicates; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 130cada5f6742..99e2ee6ca6e15 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 2c460319e3d86..d17ad9674a19d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -82,6 +83,9 @@ public class CreateSnapshotRequest extends MasterNodeRequest userMetadata; + @Nullable + private String uuid = null; + public CreateSnapshotRequest(TimeValue masterNodeTimeout) { super(masterNodeTimeout); } @@ -96,6 +100,7 @@ public CreateSnapshotRequest(TimeValue masterNodeTimeout, String repository, Str this(masterNodeTimeout); this.snapshot = snapshot; this.repository = repository; + this.uuid = UUIDs.randomBase64UUID(); } public CreateSnapshotRequest(StreamInput in) throws IOException { @@ -112,6 +117,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { waitForCompletion = in.readBoolean(); partial = in.readBoolean(); userMetadata = in.readGenericMap(); + uuid = in.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS) ? in.readOptionalString() : null; } @Override @@ -129,6 +135,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForCompletion); out.writeBoolean(partial); out.writeGenericMap(userMetadata); + if (out.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS)) { + out.writeOptionalString(uuid); + } } @Override @@ -364,6 +373,35 @@ public CreateSnapshotRequest userMetadata(@Nullable Map userMeta return this; } + /** + * Set a uuid to identify snapshot. + * If no uuid is specified, one will be created within SnapshotService + */ + public CreateSnapshotRequest uuid(String uuid) { + this.uuid = uuid; + return this; + } + + /** + * Get the uuid, generating it if one does not yet exist. + * Because the uuid can be set, this method is NOT thread-safe. + *

+ * The uuid was previously generated in SnapshotService.createSnapshot + * but was moved to the CreateSnapshotRequest constructor so that the caller could + * uniquely identify the snapshot. Unfortunately, in a mixed-version cluster, + * the CreateSnapshotRequest could be created on a node which does not yet + * generate the uuid in the constructor. In this case, the uuid + * must be generated when it is first accessed with this getter. + * + * @return the uuid that will be used for the snapshot + */ + public String uuid() { + if (this.uuid == null) { + this.uuid = UUIDs.randomBase64UUID(); + } + return this.uuid; + } + /** * @return Which plugin states should be included in the snapshot */ @@ -469,12 +507,13 @@ public boolean equals(Object o) { && Objects.equals(indicesOptions, that.indicesOptions) && Arrays.equals(featureStates, that.featureStates) && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()) - && Objects.equals(userMetadata, that.userMetadata); + && Objects.equals(userMetadata, that.userMetadata) + && Objects.equals(uuid, that.uuid); } @Override public int hashCode() { - int result = Objects.hash(snapshot, repository, indicesOptions, partial, includeGlobalState, waitForCompletion, userMetadata); + int result = Objects.hash(snapshot, repository, indicesOptions, partial, includeGlobalState, waitForCompletion, userMetadata, uuid); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(featureStates); return result; @@ -505,6 +544,8 @@ public String toString() { + masterNodeTimeout() + ", metadata=" + userMetadata + + ", uuid=" + + uuid + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 03810f027363f..9752d764bd6e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 9522a7afaec8b..5c6259d133b44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java index 5d8ab3daaf85c..bb3fbd401b3a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java @@ -18,9 +18,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportSnapshottableFeaturesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportSnapshottableFeaturesAction.java index e7deabd341312..c12b12a18dd2d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportSnapshottableFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportSnapshottableFeaturesAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 1a279e3488123..063fe49160d49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.AbstractThrottledTaskRunner; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -32,6 +31,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Releasable; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.repositories.IndexId; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java index 6933725476adf..6b31dd0c2d358 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java @@ -20,9 +20,9 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.IndexSnapshotsService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryException; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index f0d47813dad77..f9ee2d84f8732 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -68,11 +68,6 @@ public RestoreSnapshotRequest(TimeValue masterNodeTimeout) { super(masterNodeTimeout); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public RestoreSnapshotRequest(String repository, String snapshot) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, repository, snapshot); - } - /** * Constructs a new put repository request with the provided repository and snapshot names. * diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index cc0d4cdfd9ee9..ba34b8cab1021 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -17,8 +17,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -49,7 +48,7 @@ public TransportRestoreSnapshotAction( RestoreSnapshotRequest::new, indexNameExpressionResolver, RestoreSnapshotResponse::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE + threadPool.executor(ThreadPool.Names.SNAPSHOT_META) ); this.restoreService = restoreService; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 82f32d2472b97..7c5d17ced1de5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -19,11 +19,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index caedc3363e9a3..7bd0c929773a8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -24,12 +24,12 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -332,8 +332,7 @@ private void loadRepositoryData( final SnapshotsInProgress.State state = switch (snapshotInfo.state()) { case FAILED -> SnapshotsInProgress.State.FAILED; case SUCCESS, PARTIAL -> - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release + // Both of these means the snapshot has completed. SnapshotsInProgress.State.SUCCESS; default -> throw new IllegalArgumentException("Unexpected snapshot state " + snapshotInfo.state()); }; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 8d72715219e0f..0e5d7cda4a0d4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -28,11 +28,11 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.version.CompatibilityVersions; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.Predicates; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java new file mode 100644 index 0000000000000..fe1da86dd54c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java @@ -0,0 +1,404 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.action.admin.cluster.stats.LongMetric.LongMetricValue; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Holds a snapshot of the CCS telemetry statistics from {@link CCSUsageTelemetry}. + * Used to hold the stats for a single node that's part of a {@link ClusterStatsNodeResponse}, as well as to + * accumulate stats for the entire cluster and return them as part of the {@link ClusterStatsResponse}. + *
+ * Theory of operation: + * - The snapshot is created on each particular node with the stats for the node, and is sent to the coordinating node + * - Coordinating node creates an empty snapshot and merges all the node snapshots into it using add() + *
+ * The snapshot contains {@link LongMetricValue}s for latencies, which currently contain full histograms (since you can't + * produce p90 from a set of node p90s, you need the full histogram for that). To avoid excessive copying (histogram weighs several KB), + * the snapshot is designed to be mutable, so that you can add multiple snapshots to it without copying the histograms all the time. + * It is not the intent to mutate the snapshot objects otherwise. + *
+ */ +public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment { + public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; + private long totalCount; + private long successCount; + private final Map failureReasons; + + /** + * Latency metrics, overall. + */ + private final LongMetricValue took; + /** + * Latency metrics with minimize_roundtrips=true + */ + private final LongMetricValue tookMrtTrue; + /** + * Latency metrics with minimize_roundtrips=false + */ + private final LongMetricValue tookMrtFalse; + private long remotesPerSearchMax; + private double remotesPerSearchAvg; + private long skippedRemotes; + + private final Map featureCounts; + + private final Map clientCounts; + private final Map byRemoteCluster; + + /** + * Creates a new stats instance with the provided info. + */ + public CCSTelemetrySnapshot( + long totalCount, + long successCount, + Map failureReasons, + LongMetricValue took, + LongMetricValue tookMrtTrue, + LongMetricValue tookMrtFalse, + long remotesPerSearchMax, + double remotesPerSearchAvg, + long skippedRemotes, + Map featureCounts, + Map clientCounts, + Map byRemoteCluster + ) { + this.totalCount = totalCount; + this.successCount = successCount; + this.failureReasons = failureReasons; + this.took = took; + this.tookMrtTrue = tookMrtTrue; + this.tookMrtFalse = tookMrtFalse; + this.remotesPerSearchMax = remotesPerSearchMax; + this.remotesPerSearchAvg = remotesPerSearchAvg; + this.skippedRemotes = skippedRemotes; + this.featureCounts = featureCounts; + this.clientCounts = clientCounts; + this.byRemoteCluster = byRemoteCluster; + } + + /** + * Creates a new empty stats instance, that will get additional stats added through {@link #add(CCSTelemetrySnapshot)} + */ + public CCSTelemetrySnapshot() { + // Note this produces modifiable maps, so other snapshots can be merged into it + failureReasons = new HashMap<>(); + featureCounts = new HashMap<>(); + clientCounts = new HashMap<>(); + byRemoteCluster = new HashMap<>(); + took = new LongMetricValue(); + tookMrtTrue = new LongMetricValue(); + tookMrtFalse = new LongMetricValue(); + } + + public CCSTelemetrySnapshot(StreamInput in) throws IOException { + this.totalCount = in.readVLong(); + this.successCount = in.readVLong(); + this.failureReasons = in.readMap(StreamInput::readLong); + this.took = LongMetricValue.fromStream(in); + this.tookMrtTrue = LongMetricValue.fromStream(in); + this.tookMrtFalse = LongMetricValue.fromStream(in); + this.remotesPerSearchMax = in.readVLong(); + this.remotesPerSearchAvg = in.readDouble(); + this.skippedRemotes = in.readVLong(); + this.featureCounts = in.readMap(StreamInput::readLong); + this.clientCounts = in.readMap(StreamInput::readLong); + this.byRemoteCluster = in.readMap(PerClusterCCSTelemetry::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(totalCount); + out.writeVLong(successCount); + out.writeMap(failureReasons, StreamOutput::writeLong); + took.writeTo(out); + tookMrtTrue.writeTo(out); + tookMrtFalse.writeTo(out); + out.writeVLong(remotesPerSearchMax); + out.writeDouble(remotesPerSearchAvg); + out.writeVLong(skippedRemotes); + out.writeMap(featureCounts, StreamOutput::writeLong); + out.writeMap(clientCounts, StreamOutput::writeLong); + out.writeMap(byRemoteCluster, StreamOutput::writeWriteable); + } + + public long getTotalCount() { + return totalCount; + } + + public long getSuccessCount() { + return successCount; + } + + public Map getFailureReasons() { + return Collections.unmodifiableMap(failureReasons); + } + + public LongMetricValue getTook() { + return took; + } + + public LongMetricValue getTookMrtTrue() { + return tookMrtTrue; + } + + public LongMetricValue getTookMrtFalse() { + return tookMrtFalse; + } + + public long getRemotesPerSearchMax() { + return remotesPerSearchMax; + } + + public double getRemotesPerSearchAvg() { + return remotesPerSearchAvg; + } + + public long getSearchCountWithSkippedRemotes() { + return skippedRemotes; + } + + public Map getFeatureCounts() { + return Collections.unmodifiableMap(featureCounts); + } + + public Map getClientCounts() { + return Collections.unmodifiableMap(clientCounts); + } + + public Map getByRemoteCluster() { + return Collections.unmodifiableMap(byRemoteCluster); + } + + public static class PerClusterCCSTelemetry implements Writeable, ToXContentFragment { + private long count; + private long skippedCount; + private final LongMetricValue took; + + public PerClusterCCSTelemetry() { + took = new LongMetricValue(); + } + + public PerClusterCCSTelemetry(long count, long skippedCount, LongMetricValue took) { + this.took = took; + this.skippedCount = skippedCount; + this.count = count; + } + + public PerClusterCCSTelemetry(PerClusterCCSTelemetry other) { + this.count = other.count; + this.skippedCount = other.skippedCount; + this.took = new LongMetricValue(other.took); + } + + public PerClusterCCSTelemetry(StreamInput in) throws IOException { + this.count = in.readVLong(); + this.skippedCount = in.readVLong(); + this.took = LongMetricValue.fromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(count); + out.writeVLong(skippedCount); + took.writeTo(out); + } + + public PerClusterCCSTelemetry add(PerClusterCCSTelemetry v) { + count += v.count; + skippedCount += v.skippedCount; + took.add(v.took); + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("total", count); + builder.field("skipped", skippedCount); + publishLatency(builder, "took", took); + builder.endObject(); + return builder; + } + + public long getCount() { + return count; + } + + public long getSkippedCount() { + return skippedCount; + } + + public LongMetricValue getTook() { + return took; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PerClusterCCSTelemetry that = (PerClusterCCSTelemetry) o; + return count == that.count && skippedCount == that.skippedCount && Objects.equals(took, that.took); + } + + @Override + public int hashCode() { + return Objects.hash(count, skippedCount, took); + } + } + + /** + * Add the provided stats to the ones held by the current instance, effectively merging the two. + * @param stats the other stats object to add to this one + */ + public void add(CCSTelemetrySnapshot stats) { + // This should be called in ClusterStatsResponse ctor only, so we don't need to worry about concurrency + if (stats.totalCount == 0) { + // Just ignore the empty stats. + // This could happen if the node is brand new or if the stats are not available, e.g. because it runs an old version. + return; + } + long oldCount = totalCount; + totalCount += stats.totalCount; + successCount += stats.successCount; + skippedRemotes += stats.skippedRemotes; + stats.failureReasons.forEach((k, v) -> failureReasons.merge(k, v, Long::sum)); + stats.featureCounts.forEach((k, v) -> featureCounts.merge(k, v, Long::sum)); + stats.clientCounts.forEach((k, v) -> clientCounts.merge(k, v, Long::sum)); + took.add(stats.took); + tookMrtTrue.add(stats.tookMrtTrue); + tookMrtFalse.add(stats.tookMrtFalse); + remotesPerSearchMax = Math.max(remotesPerSearchMax, stats.remotesPerSearchMax); + if (totalCount > 0 && oldCount > 0) { + // Weighted average + remotesPerSearchAvg = (remotesPerSearchAvg * oldCount + stats.remotesPerSearchAvg * stats.totalCount) / totalCount; + } else { + // If we didn't have any old value, we just take the new one + remotesPerSearchAvg = stats.remotesPerSearchAvg; + } + // we copy the object here since we'll be modifying it later on subsequent adds + // TODO: this may be sub-optimal, as we'll be copying histograms when adding first snapshot to an empty container, + // which we could have avoided probably. + stats.byRemoteCluster.forEach((r, v) -> byRemoteCluster.merge(r, new PerClusterCCSTelemetry(v), PerClusterCCSTelemetry::add)); + } + + /** + * Publishes the latency statistics to the provided {@link XContentBuilder}. + * Example: + * "took": { + * "max": 345032, + * "avg": 1620, + * "p90": 2570 + * } + */ + public static void publishLatency(XContentBuilder builder, String name, LongMetricValue took) throws IOException { + builder.startObject(name); + { + builder.field("max", took.max()); + builder.field("avg", took.avg()); + builder.field("p90", took.p90()); + } + builder.endObject(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(CCS_TELEMETRY_FIELD_NAME); + { + builder.field("total", totalCount); + builder.field("success", successCount); + builder.field("skipped", skippedRemotes); + publishLatency(builder, "took", took); + publishLatency(builder, "took_mrt_true", tookMrtTrue); + publishLatency(builder, "took_mrt_false", tookMrtFalse); + builder.field("remotes_per_search_max", remotesPerSearchMax); + builder.field("remotes_per_search_avg", remotesPerSearchAvg); + builder.field("failure_reasons", failureReasons); + builder.field("features", featureCounts); + builder.field("clients", clientCounts); + builder.startObject("clusters"); + { + for (var entry : byRemoteCluster.entrySet()) { + String remoteName = entry.getKey(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { + remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; + } + builder.field(remoteName, entry.getValue()); + } + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CCSTelemetrySnapshot that = (CCSTelemetrySnapshot) o; + return totalCount == that.totalCount + && successCount == that.successCount + && skippedRemotes == that.skippedRemotes + && Objects.equals(failureReasons, that.failureReasons) + && Objects.equals(took, that.took) + && Objects.equals(tookMrtTrue, that.tookMrtTrue) + && Objects.equals(tookMrtFalse, that.tookMrtFalse) + && Objects.equals(remotesPerSearchMax, that.remotesPerSearchMax) + && Objects.equals(remotesPerSearchAvg, that.remotesPerSearchAvg) + && Objects.equals(featureCounts, that.featureCounts) + && Objects.equals(clientCounts, that.clientCounts) + && Objects.equals(byRemoteCluster, that.byRemoteCluster); + } + + @Override + public int hashCode() { + return Objects.hash( + totalCount, + successCount, + failureReasons, + took, + tookMrtTrue, + tookMrtFalse, + remotesPerSearchMax, + remotesPerSearchAvg, + skippedRemotes, + featureCounts, + clientCounts, + byRemoteCluster + ); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java new file mode 100644 index 0000000000000..b2d75ac8f61f3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.Result; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NoSeedNodeLeftException; +import org.elasticsearch.transport.NoSuchRemoteClusterException; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.transport.RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + +/** + * This is a container for telemetry data from an individual cross-cluster search for _search or _async_search (or + * other search endpoints that use the {@link TransportSearchAction} such as _msearch). + */ +public class CCSUsage { + private final long took; + private final Result status; + private final Set features; + private final int remotesCount; + + private final String client; + + private final Set skippedRemotes; + private final Map perClusterUsage; + + public static class Builder { + private long took; + private final Set features; + private Result status = Result.SUCCESS; + private int remotesCount; + private String client; + private final Set skippedRemotes; + private final Map perClusterUsage; + + public Builder() { + features = new HashSet<>(); + skippedRemotes = new HashSet<>(); + perClusterUsage = new HashMap<>(); + } + + public Builder took(long took) { + this.took = took; + return this; + } + + public Builder setFailure(Result failureType) { + this.status = failureType; + return this; + } + + public Builder setFailure(Exception e) { + return setFailure(getFailureType(e)); + } + + public Builder setFeature(String feature) { + this.features.add(feature); + return this; + } + + public Builder setClient(String client) { + this.client = client; + return this; + } + + public Builder skippedRemote(String remote) { + this.skippedRemotes.add(remote); + return this; + } + + public Builder perClusterUsage(String remote, TimeValue took) { + this.perClusterUsage.put(remote, new PerClusterUsage(took)); + return this; + } + + public CCSUsage build() { + return new CCSUsage(took, status, remotesCount, skippedRemotes, features, client, perClusterUsage); + } + + public Builder setRemotesCount(int remotesCount) { + this.remotesCount = remotesCount; + return this; + } + + public int getRemotesCount() { + return remotesCount; + } + + /** + * Get failure type as {@link Result} from the search failure exception. + */ + public static Result getFailureType(Exception e) { + var unwrapped = ExceptionsHelper.unwrapCause(e); + if (unwrapped instanceof Exception) { + e = (Exception) unwrapped; + } + if (isRemoteUnavailable(e)) { + return Result.REMOTES_UNAVAILABLE; + } + if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { + return Result.NOT_FOUND; + } + if (e instanceof TaskCancelledException || (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null)) { + return Result.CANCELED; + } + if (ExceptionsHelper.unwrap(e, SearchTimeoutException.class) != null) { + return Result.TIMEOUT; + } + if (ExceptionsHelper.unwrap(e, ElasticsearchSecurityException.class) != null) { + return Result.SECURITY; + } + if (ExceptionsHelper.unwrapCorruption(e) != null) { + return Result.CORRUPTION; + } + // This is kind of last resort check - if we still don't know the reason but all shard failures are remote, + // we assume it's remote's fault somehow. + if (e instanceof SearchPhaseExecutionException spe) { + // If this is a failure that happened because of remote failures only + var groupedFails = ExceptionsHelper.groupBy(spe.shardFailures()); + if (Arrays.stream(groupedFails).allMatch(Builder::isRemoteFailure)) { + return Result.REMOTES_UNAVAILABLE; + } + } + // OK we don't know what happened + return Result.UNKNOWN; + } + + /** + * Is this failure exception because remote was unavailable? + * See also: TransportResolveClusterAction#notConnectedError + */ + static boolean isRemoteUnavailable(Exception e) { + if (ExceptionsHelper.unwrap( + e, + ConnectTransportException.class, + NoSuchRemoteClusterException.class, + NoSeedNodeLeftException.class + ) != null) { + return true; + } + Throwable ill = ExceptionsHelper.unwrap(e, IllegalStateException.class, IllegalArgumentException.class); + if (ill != null && (ill.getMessage().contains("Unable to open any connections") || ill.getMessage().contains("unknown host"))) { + return true; + } + // Ok doesn't look like any of the known remote exceptions + return false; + } + + /** + * Is this failure coming from a remote cluster? + */ + static boolean isRemoteFailure(ShardOperationFailedException failure) { + if (failure instanceof ShardSearchFailure shardFailure) { + SearchShardTarget shard = shardFailure.shard(); + return shard != null && shard.getClusterAlias() != null && LOCAL_CLUSTER_GROUP_KEY.equals(shard.getClusterAlias()) == false; + } + return false; + } + } + + private CCSUsage( + long took, + Result status, + int remotesCount, + Set skippedRemotes, + Set features, + String client, + Map perClusterUsage + ) { + this.status = status; + this.remotesCount = remotesCount; + this.features = features; + this.client = client; + this.took = took; + this.skippedRemotes = skippedRemotes; + this.perClusterUsage = perClusterUsage; + } + + public Map getPerClusterUsage() { + return perClusterUsage; + } + + public Result getStatus() { + return status; + } + + public Set getFeatures() { + return features; + } + + public long getRemotesCount() { + return remotesCount; + } + + public String getClient() { + return client; + } + + public long getTook() { + return took; + } + + public Set getSkippedRemotes() { + return skippedRemotes; + } + + public static class PerClusterUsage { + + // if MRT=true, the took time on the remote cluster (if MRT=true), otherwise the overall took time + private long took; + + public PerClusterUsage(TimeValue took) { + if (took != null) { + this.took = took.millis(); + } + } + + public long getTook() { + return took; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java new file mode 100644 index 0000000000000..60766bd4068e3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.util.Maps; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.LongAdder; + +/** + * Service holding accumulated CCS search usage statistics. Individual cross-cluster searches will pass + * CCSUsage data here to have it collated and aggregated. Snapshots of the current CCS Telemetry Usage + * can be obtained by getting {@link CCSTelemetrySnapshot} objects. + *
+ * Theory of operation: + * Each search creates a {@link CCSUsage.Builder}, which can be updated during the progress of the search request, + * and then it instantiates a {@link CCSUsage} object when the request is finished. + * That object is passed to {@link #updateUsage(CCSUsage)} on the request processing end (whether successful or not). + * The {@link #updateUsage(CCSUsage)} method will then update the internal counters and metrics. + *
+ * When we need to return the current state of the telemetry, we can call {@link #getCCSTelemetrySnapshot()} which produces + * a snapshot of the current state of the telemetry as {@link CCSTelemetrySnapshot}. These snapshots are additive so + * when collecting the snapshots from multiple nodes, an empty snapshot is created and then all the node's snapshots are added + * to it to obtain the summary telemetry. + */ +public class CCSUsageTelemetry { + + /** + * Result of the request execution. + * Either "success" or a failure reason. + */ + public enum Result { + SUCCESS("success"), + REMOTES_UNAVAILABLE("remotes_unavailable"), + CANCELED("canceled"), + NOT_FOUND("not_found"), + TIMEOUT("timeout"), + CORRUPTION("corruption"), + SECURITY("security"), + // May be helpful if there's a lot of other reasons, and it may be hard to calculate the unknowns for some clients. + UNKNOWN("other"); + + private final String name; + + Result(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } + + // Not enum because we won't mind other places adding their own features + public static final String MRT_FEATURE = "mrt_on"; + public static final String ASYNC_FEATURE = "async"; + public static final String WILDCARD_FEATURE = "wildcards"; + + // The list of known Elastic clients. May be incomplete. + public static final Set KNOWN_CLIENTS = Set.of( + "kibana", + "cloud", + "logstash", + "beats", + "fleet", + "ml", + "security", + "observability", + "enterprise-search", + "elasticsearch", + "connectors", + "connectors-cli" + ); + + private final LongAdder totalCount; + private final LongAdder successCount; + private final Map failureReasons; + + /** + * Latency metrics overall + */ + private final LongMetric took; + /** + * Latency metrics with minimize_roundtrips=true + */ + private final LongMetric tookMrtTrue; + /** + * Latency metrics with minimize_roundtrips=false + */ + private final LongMetric tookMrtFalse; + private final LongMetric remotesPerSearch; + private final LongAdder skippedRemotes; + + private final Map featureCounts; + + private final Map clientCounts; + private final Map byRemoteCluster; + + public CCSUsageTelemetry() { + this.byRemoteCluster = new ConcurrentHashMap<>(); + totalCount = new LongAdder(); + successCount = new LongAdder(); + failureReasons = new ConcurrentHashMap<>(); + took = new LongMetric(); + tookMrtTrue = new LongMetric(); + tookMrtFalse = new LongMetric(); + remotesPerSearch = new LongMetric(); + skippedRemotes = new LongAdder(); + featureCounts = new ConcurrentHashMap<>(); + clientCounts = new ConcurrentHashMap<>(); + } + + public void updateUsage(CCSUsage ccsUsage) { + assert ccsUsage.getRemotesCount() > 0 : "Expected at least one remote cluster in CCSUsage"; + // TODO: fork this to a background thread? + doUpdate(ccsUsage); + } + + // This is not synchronized, instead we ensure that every metric in the class is thread-safe. + private void doUpdate(CCSUsage ccsUsage) { + totalCount.increment(); + long searchTook = ccsUsage.getTook(); + if (isSuccess(ccsUsage)) { + successCount.increment(); + took.record(searchTook); + if (isMRT(ccsUsage)) { + tookMrtTrue.record(searchTook); + } else { + tookMrtFalse.record(searchTook); + } + ccsUsage.getPerClusterUsage().forEach((r, u) -> byRemoteCluster.computeIfAbsent(r, PerClusterCCSTelemetry::new).update(u)); + } else { + failureReasons.computeIfAbsent(ccsUsage.getStatus(), k -> new LongAdder()).increment(); + } + + remotesPerSearch.record(ccsUsage.getRemotesCount()); + if (ccsUsage.getSkippedRemotes().isEmpty() == false) { + skippedRemotes.increment(); + ccsUsage.getSkippedRemotes().forEach(remote -> byRemoteCluster.computeIfAbsent(remote, PerClusterCCSTelemetry::new).skipped()); + } + ccsUsage.getFeatures().forEach(f -> featureCounts.computeIfAbsent(f, k -> new LongAdder()).increment()); + String client = ccsUsage.getClient(); + if (client != null && KNOWN_CLIENTS.contains(client)) { + // We count only known clients for now + clientCounts.computeIfAbsent(ccsUsage.getClient(), k -> new LongAdder()).increment(); + } + } + + private boolean isMRT(CCSUsage ccsUsage) { + return ccsUsage.getFeatures().contains(MRT_FEATURE); + } + + private boolean isSuccess(CCSUsage ccsUsage) { + return ccsUsage.getStatus() == Result.SUCCESS; + } + + public Map getTelemetryByCluster() { + return byRemoteCluster; + } + + /** + * Telemetry of each remote involved in cross cluster searches + */ + public static class PerClusterCCSTelemetry { + private final String clusterAlias; + // The number of successful (not skipped) requests to this cluster. + private final LongAdder count; + private final LongAdder skippedCount; + // This is only over the successful requetss, skipped ones do not count here. + private final LongMetric took; + + PerClusterCCSTelemetry(String clusterAlias) { + this.clusterAlias = clusterAlias; + this.count = new LongAdder(); + took = new LongMetric(); + this.skippedCount = new LongAdder(); + } + + void update(CCSUsage.PerClusterUsage remoteUsage) { + count.increment(); + took.record(remoteUsage.getTook()); + } + + void skipped() { + skippedCount.increment(); + } + + public long getCount() { + return count.longValue(); + } + + @Override + public String toString() { + return "PerClusterCCSTelemetry{" + + "clusterAlias='" + + clusterAlias + + '\'' + + ", count=" + + count + + ", latency=" + + took.toString() + + '}'; + } + + public long getSkippedCount() { + return skippedCount.longValue(); + } + + public CCSTelemetrySnapshot.PerClusterCCSTelemetry getSnapshot() { + return new CCSTelemetrySnapshot.PerClusterCCSTelemetry(count.longValue(), skippedCount.longValue(), took.getValue()); + } + + } + + public CCSTelemetrySnapshot getCCSTelemetrySnapshot() { + Map reasonsMap = Maps.newMapWithExpectedSize(failureReasons.size()); + failureReasons.forEach((k, v) -> reasonsMap.put(k.getName(), v.longValue())); + + LongMetric.LongMetricValue remotes = remotesPerSearch.getValue(); + + // Maps returned here are unmodifiable, but the empty ctor produces modifiable maps + return new CCSTelemetrySnapshot( + totalCount.longValue(), + successCount.longValue(), + Collections.unmodifiableMap(reasonsMap), + took.getValue(), + tookMrtTrue.getValue(), + tookMrtFalse.getValue(), + remotes.max(), + remotes.avg(), + skippedRemotes.longValue(), + Collections.unmodifiableMap(Maps.transformValues(featureCounts, LongAdder::longValue)), + Collections.unmodifiableMap(Maps.transformValues(clientCounts, LongAdder::longValue)), + Collections.unmodifiableMap(Maps.transformValues(byRemoteCluster, PerClusterCCSTelemetry::getSnapshot)) + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index d74889b623589..b48295dc8b3eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -20,29 +20,33 @@ import org.elasticsearch.core.Nullable; import java.io.IOException; +import java.util.Objects; public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeInfo nodeInfo; private final NodeStats nodeStats; private final ShardStats[] shardsStats; - private ClusterHealthStatus clusterStatus; + private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; + private final RepositoryUsageStats repositoryUsageStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); - clusterStatus = null; - if (in.readBoolean()) { - clusterStatus = ClusterHealthStatus.readFrom(in); - } + this.clusterStatus = in.readOptionalWriteable(ClusterHealthStatus::readFrom); this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats = RepositoryUsageStats.readFrom(in); + } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; + } } public ClusterStatsNodeResponse( @@ -51,14 +55,16 @@ public ClusterStatsNodeResponse( NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats, - SearchUsageStats searchUsageStats + SearchUsageStats searchUsageStats, + RepositoryUsageStats repositoryUsageStats ) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; this.shardsStats = shardsStats; this.clusterStatus = clusterStatus; - this.searchUsageStats = searchUsageStats; + this.searchUsageStats = Objects.requireNonNull(searchUsageStats); + this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); } public NodeInfo nodeInfo() { @@ -85,20 +91,22 @@ public SearchUsageStats searchUsageStats() { return searchUsageStats; } + public RepositoryUsageStats repositoryUsageStats() { + return repositoryUsageStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (clusterStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(clusterStatus.value()); - } + out.writeOptionalWriteable(clusterStatus); nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } + if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats.writeTo(out); + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 36e7b247befac..b6dd40e8c8b79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsResponse extends BaseNodesResponse r.isEmpty() == false) + // stats should be the same on every node so just pick one of them + .findAny() + .orElse(RepositoryUsageStats.EMPTY); } public String getClusterUUID() { @@ -113,6 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("snapshots"); clusterSnapshotStats.toXContent(builder, params); + builder.field("repositories"); + repositoryUsageStats.toXContent(builder, params); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java new file mode 100644 index 0000000000000..f3bb936b108c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.HdrHistogram.ConcurrentHistogram; +import org.HdrHistogram.Histogram; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; +import java.util.zip.DataFormatException; + +/** + * Metric class that accepts longs and provides count, average, max and percentiles. + * Abstracts out the details of how exactly the values are stored and calculated. + * {@link LongMetricValue} is a snapshot of the current state of the metric. + */ +public class LongMetric { + private final Histogram values; + private static final int SIGNIFICANT_DIGITS = 2; + + LongMetric() { + values = new ConcurrentHistogram(SIGNIFICANT_DIGITS); + } + + void record(long v) { + values.recordValue(v); + } + + LongMetricValue getValue() { + return new LongMetricValue(values); + } + + /** + * Snapshot of {@link LongMetric} value that provides the current state of the metric. + * Can be added with another {@link LongMetricValue} object. + */ + public static final class LongMetricValue implements Writeable { + // We have to carry the full histogram around since we might need to calculate aggregate percentiles + // after collecting individual stats from the nodes, and we can't do that without having the full histogram. + // This costs about 2K per metric, which was deemed acceptable. + private final Histogram values; + + public LongMetricValue(Histogram values) { + // Copy here since we don't want the snapshot value to change if somebody updates the original one + this.values = values.copy(); + } + + public LongMetricValue(LongMetricValue v) { + this.values = v.values.copy(); + } + + LongMetricValue() { + this.values = new Histogram(SIGNIFICANT_DIGITS); + } + + public void add(LongMetricValue v) { + this.values.add(v.values); + } + + public static LongMetricValue fromStream(StreamInput in) throws IOException { + byte[] b = in.readByteArray(); + ByteBuffer bb = ByteBuffer.wrap(b); + try { + // TODO: not sure what is the good value for minBarForHighestToLowestValueRatio here? + Histogram dh = Histogram.decodeFromCompressedByteBuffer(bb, 1); + return new LongMetricValue(dh); + } catch (DataFormatException e) { + throw new IOException(e); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ByteBuffer b = ByteBuffer.allocate(values.getNeededByteBufferCapacity()); + values.encodeIntoCompressedByteBuffer(b); + int size = b.position(); + out.writeVInt(size); + out.writeBytes(b.array(), 0, size); + } + + public long count() { + return values.getTotalCount(); + } + + public long max() { + return values.getMaxValue(); + } + + public long avg() { + return (long) Math.ceil(values.getMean()); + } + + public long p90() { + return values.getValueAtPercentile(90); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + if (obj == null || obj.getClass() != this.getClass()) return false; + var that = (LongMetricValue) obj; + return this.values.equals(that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "LongMetricValue[count=" + count() + ", " + "max=" + max() + ", " + "avg=" + avg() + "]"; + } + + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java new file mode 100644 index 0000000000000..771aa0fbef842 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + * Stats on repository feature usage exposed in cluster stats for telemetry. + * + * @param statsByType a count of the repositories using various named features, keyed by repository type and then by feature name. + */ +public record RepositoryUsageStats(Map> statsByType) implements Writeable, ToXContentObject { + + public static final RepositoryUsageStats EMPTY = new RepositoryUsageStats(Map.of()); + + public static RepositoryUsageStats readFrom(StreamInput in) throws IOException { + final var statsByType = in.readMap(i -> i.readMap(StreamInput::readVLong)); + if (statsByType.isEmpty()) { + return EMPTY; + } else { + return new RepositoryUsageStats(statsByType); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(statsByType, (o, m) -> o.writeMap(m, StreamOutput::writeVLong)); + } + + public boolean isEmpty() { + return statsByType.isEmpty(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (Map.Entry> typeAndStats : statsByType.entrySet()) { + builder.startObject(typeAndStats.getKey()); + for (Map.Entry statAndValue : typeAndStats.getValue().entrySet()) { + builder.field(statAndValue.getKey(), statAndValue.getValue()); + } + builder.endObject(); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 5d12cb5c0f657..1912de3cfa4d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CancellableSingleObjectCache; @@ -40,7 +39,9 @@ import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -78,6 +79,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final NodeService nodeService; private final IndicesService indicesService; + private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final MetadataStatsCache mappingStatsCache; @@ -90,6 +92,7 @@ public TransportClusterStatsAction( TransportService transportService, NodeService nodeService, IndicesService indicesService, + RepositoriesService repositoriesService, UsageService usageService, ActionFilters actionFilters ) { @@ -103,6 +106,7 @@ public TransportClusterStatsAction( ); this.nodeService = nodeService; this.indicesService = indicesService; + this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -237,12 +241,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq } } - ClusterHealthStatus clusterStatus = null; - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { - clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); - } + final ClusterState clusterState = clusterService.state(); + final ClusterHealthStatus clusterStatus = clusterState.nodes().isLocalNodeElectedMaster() + ? new ClusterStateHealth(clusterState).getStatus() + : null; + + final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); - SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); + final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -250,7 +256,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), - searchUsageStats + searchUsageStats, + repositoryUsageStats ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 96dbaec7a4487..a893a77f660f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -26,13 +27,12 @@ public DeleteStoredScriptRequest(StreamInput in) throws IOException { id = in.readString(); } - DeleteStoredScriptRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + DeleteStoredScriptRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } - public DeleteStoredScriptRequest(String id) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); - + public DeleteStoredScriptRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String id) { + super(masterNodeTimeout, ackTimeout); this.id = id; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java deleted file mode 100644 index ce074e17ebb75..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< - DeleteStoredScriptRequest, - AcknowledgedResponse, - DeleteStoredScriptRequestBuilder> { - - public DeleteStoredScriptRequestBuilder(ElasticsearchClient client) { - super(client, TransportDeleteStoredScriptAction.TYPE, new DeleteStoredScriptRequest()); - } - - public DeleteStoredScriptRequestBuilder setId(String id) { - request.id(id); - - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java index a0b6f9b26cb6c..3a3756553bd76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptAction.java @@ -10,12 +10,12 @@ import org.elasticsearch.action.ActionType; -public class GetStoredScriptAction extends ActionType { +public class GetStoredScriptAction { - public static final GetStoredScriptAction INSTANCE = new GetStoredScriptAction(); - public static final String NAME = "cluster:admin/script/get"; + public static final ActionType INSTANCE = new ActionType<>("cluster:admin/script/get"); + public static final String NAME = INSTANCE.name(); private GetStoredScriptAction() { - super(NAME); + /* no instances */ } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 1e8d865d9eb8c..f2f651b127573 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -21,13 +22,12 @@ public class GetStoredScriptRequest extends MasterNodeReadRequest { - - public GetStoredScriptRequestBuilder(ElasticsearchClient client) { - super(client, GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest()); - } - - public GetStoredScriptRequestBuilder setId(String id) { - request.id(id); - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 7d6ac9d319fa1..1ac151f0f4971 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -41,12 +42,20 @@ public PutStoredScriptRequest(StreamInput in) throws IOException { source = new StoredScriptSource(in); } - public PutStoredScriptRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutStoredScriptRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } - public PutStoredScriptRequest(String id, String context, BytesReference content, XContentType xContentType, StoredScriptSource source) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutStoredScriptRequest( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String id, + String context, + BytesReference content, + XContentType xContentType, + StoredScriptSource source + ) { + super(masterNodeTimeout, ackTimeout); this.id = id; this.context = context; this.content = content; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java deleted file mode 100644 index 9e353382f84a9..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.storedscripts; - -import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xcontent.XContentType; - -public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< - PutStoredScriptRequest, - AcknowledgedResponse, - PutStoredScriptRequestBuilder> { - - public PutStoredScriptRequestBuilder(ElasticsearchClient client) { - super(client, TransportPutStoredScriptAction.TYPE, new PutStoredScriptRequest()); - } - - public PutStoredScriptRequestBuilder setId(String id) { - request.id(id); - return this; - } - - public PutStoredScriptRequestBuilder setContext(String context) { - request.context(context); - return this; - } - - /** - * Set the source of the script along with the content type of the source - */ - public PutStoredScriptRequestBuilder setContent(BytesReference source, XContentType xContentType) { - request.content(source, xContentType); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index a5d1fd7e151c5..9362564e0855d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java index 47e1ddc4c9ac1..58a68c558f4c7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptContextInfo; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java index ec18439653380..f8fe7fb9b85a1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java @@ -11,8 +11,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index f5c674df2e475..daf8411d0808f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -16,8 +16,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index 6a73cd0b91264..2ce5fa942c948 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index b9a16a4fa44bf..7c4d77be1364b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -19,8 +19,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java index b4f483e6f8161..b8ad70ad8b6f8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java @@ -48,7 +48,7 @@ public class IndicesAliasesResponse extends AcknowledgedResponse { protected IndicesAliasesResponse(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.ALIAS_ACTION_RESULTS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { this.errors = in.readBoolean(); this.actionResults = in.readCollectionAsImmutableList(AliasActionResult::new); } else { @@ -91,7 +91,7 @@ public static IndicesAliasesResponse build(final List actionR @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.ALIAS_ACTION_RESULTS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeBoolean(errors); out.writeCollection(actionResults); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index f9c255bb057d8..fbbe2b1b60a1d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -30,11 +30,11 @@ import org.elasticsearch.cluster.metadata.MetadataIndexAliasesService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 4f7525c700fc2..77f2aca410e68 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -27,6 +26,7 @@ import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index b0ac4311f6b09..76a972f5b900b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; @@ -43,6 +42,7 @@ import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java index 3e13b55dbd9d4..5b9810eb466d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java @@ -25,12 +25,12 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 428fd6e083116..51402bf8c80b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -20,9 +20,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f87ea1e4cd6c7..9b6331a9d320d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -21,13 +21,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 643f92ec3378f..cfeab14d05e32 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -29,6 +28,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -68,7 +68,8 @@ public TransportVerifyShardBeforeCloseAction( ShardRequest::new, threadPool.executor(ThreadPool.Names.MANAGEMENT), SyncGlobalCheckpointAfterOperation.DoNotSync, - PrimaryActionExecution.RejectOnOverload + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.SubjectToCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index e68263aab5330..fed8044477cc0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -46,6 +45,7 @@ import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 8a46daa45e73b..948199fbe74f4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; -import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -43,8 +42,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final Set aliases = new HashSet<>(); - private final Set blocks = new HashSet<>(); - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; private boolean performReroute = true; @@ -125,10 +122,6 @@ public Set aliases() { return aliases; } - public Set blocks() { - return blocks; - } - public Index recoverFrom() { return recoverFrom; } @@ -229,8 +222,6 @@ public String toString() { + settings + ", aliases=" + aliases - + ", blocks=" - + blocks + ", waitForActiveShards=" + waitForActiveShards + ", systemDataStreamDescriptor=" diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index e62205938f6fd..438f3930dee7e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index 95b1410e16565..7b8d70693f40d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -31,10 +31,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java index b6e8693acc66d..ff2b32ac5cc6c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java @@ -15,9 +15,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.gateway.DanglingIndicesState; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index d3957be682cfd..4c90ce9875266 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -22,9 +22,9 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.gateway.LocalAllocateDangledIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java index f6b809ea1ea49..40ca4e6de9435 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.gateway.DanglingIndicesState; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 35034eebfaa93..a2cda7fc1ac33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -21,9 +21,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java index 8380edb4cb6ed..8676daf0a55d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/TransportAnalyzeIndexDiskUsageAction.java @@ -24,13 +24,13 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 96b4a0191b10c..bc34e3fb99e04 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -17,8 +17,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 69e1309b89aef..52662a79d7cbd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -60,7 +60,8 @@ public TransportShardFlushAction( ShardFlushRequest::new, threadPool.executor(ThreadPool.Names.FLUSH), SyncGlobalCheckpointAfterOperation.DoNotSync, - PrimaryActionExecution.RejectOnOverload + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.SubjectToCircuitBreaker ); transportService.registerRequestHandler( PRE_SYNCED_FLUSH_ACTION_NAME, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index df98e8f12f18e..c474a3d40cdc9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -20,10 +20,10 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index eb1f823dc0302..7de14400ec2b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -19,11 +19,11 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index 4fd770d9a3576..849977a39f2a6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -15,8 +15,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 27516e0ad5a7f..7b024737dc70a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; @@ -29,6 +28,7 @@ import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index f55eef4ebbae6..7691dac2e1dc4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index 9c3b08ef49add..6a3977bcc8d4a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -20,10 +20,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index ca6e97c3e1334..737ee28bbf873 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index e8fc62d480bc4..bd89d124b3f76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -21,9 +21,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java index 72731bc636b13..f5b12dbe209ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java @@ -21,9 +21,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java index e93b3983ee85b..27234976f780b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -26,6 +25,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -69,7 +69,8 @@ public TransportVerifyShardIndexBlockAction( ShardRequest::new, threadPool.executor(ThreadPool.Names.MANAGEMENT), SyncGlobalCheckpointAfterOperation.DoNotSync, - PrimaryActionExecution.RejectOnOverload + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.SubjectToCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index c74981d475389..cec8e26ded364 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -18,12 +18,12 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 5d6f60216ae05..a5d0ec36a3bfd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index cc4edcf0efb81..5fa9ba6662c2e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -20,12 +20,12 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.ThreadPool; @@ -70,7 +70,8 @@ public TransportShardRefreshAction( ShardRefreshReplicaRequest::new, threadPool.executor(ThreadPool.Names.REFRESH), SyncGlobalCheckpointAfterOperation.DoNotSync, - PrimaryActionExecution.RejectOnOverload + PrimaryActionExecution.RejectOnOverload, + ReplicaActionExecution.SubjectToCircuitBreaker ); // registers the unpromotable version of shard refresh action new TransportUnpromotableShardRefreshAction(clusterService, transportService, shardStateAction, actionFilters, indicesService); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java index b4357c69c46ae..29eaeb273a8a3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java @@ -14,9 +14,9 @@ import org.elasticsearch.action.support.broadcast.unpromotable.TransportBroadcastUnpromotableAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java index 118f139045971..224ea63150420 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java @@ -54,12 +54,14 @@ public ResolveClusterActionRequest(String[] names) { this(names, DEFAULT_INDICES_OPTIONS); } + @SuppressWarnings("this-escape") public ResolveClusterActionRequest(String[] names, IndicesOptions indicesOptions) { this.names = names; this.localIndicesRequested = localIndicesPresent(names); this.indicesOptions = indicesOptions; } + @SuppressWarnings("this-escape") public ResolveClusterActionRequest(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index 1f23ee724e542..1e5036a9bfc63 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -36,6 +35,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.RemoteClusterAware; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index 8b171b0d12bf5..1ac7def758ab5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -26,10 +26,10 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index 29be34bc3b2c2..ef72fdd93caeb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -29,8 +29,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 5c7518abdbbf8..9d34b9ab5f126 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; @@ -45,6 +44,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.SnapshotInProgressException; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.telemetry.TelemetryProvider; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java index 93a11b8fe0855..acd26f2984c99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/OptimalShardCountCondition.java @@ -65,6 +65,6 @@ public static OptimalShardCountCondition fromXContent(XContentParser parser) thr @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersions.AUTO_SHARDING_ROLLOVER_CONDITION); + return version.onOrAfter(TransportVersions.V_8_14_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 6302b1c9ef9fb..db4fad99d4f48 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -116,7 +116,7 @@ public RolloverRequest(StreamInput in) throws IOException { } else { lazy = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ROLLOVER)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { indicesOptions = IndicesOptions.readIndicesOptions(in); } } @@ -168,7 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeBoolean(lazy); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ROLLOVER)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { indicesOptions.writeIndicesOptions(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index d76cfedd279b5..9df3be1994fdf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -44,11 +44,11 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index bd12cfdbc7962..429ebe365bbe1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -36,7 +36,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse { private volatile Map indicesSegments; - IndicesSegmentResponse( + public IndicesSegmentResponse( ShardSegments[] shards, int totalShards, int successfulShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 8cff400e6dde2..f5a79eec7ab33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -18,11 +18,11 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index e11c5a1a6103d..7396e1d383bd5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -25,6 +24,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 36c0634fa9dba..25a22ae52ac61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 4b1c5b3f58dd5..08399cdd3f9cb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThrottledIterator; @@ -41,6 +40,7 @@ import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 8a2eb18b5164f..0fe8ee9670f00 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -27,12 +27,12 @@ import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java index d463c01bfda81..db49c199e05b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java @@ -18,11 +18,11 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index dcde4ef6047c1..4c042570704d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; @@ -27,6 +26,7 @@ import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 9ac10d782a605..3fcdd96feaf5f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -23,10 +23,10 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index fa40a901c705b..2c6a0a2945abb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -23,10 +23,10 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 8d3a83a929389..757930bc0505b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -20,8 +20,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 5483097b140da..f0552cc3226f5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -121,8 +121,6 @@ public static class Response extends ActionResponse implements ToXContentObject private final Map componentTemplates; @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; public Response(StreamInput in) throws IOException { super(in); @@ -132,25 +130,39 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { - globalRetention = in.readOptionalWriteable(DataStreamGlobalRetention::read); - } else { - globalRetention = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); } } + /** + * Please use {@link GetComponentTemplateAction.Response#Response(Map)} + */ + @Deprecated public Response(Map componentTemplates, @Nullable DataStreamGlobalRetention globalRetention) { - this(componentTemplates, null, globalRetention); + this(componentTemplates, (RolloverConfiguration) null); } + /** + * Please use {@link GetComponentTemplateAction.Response#Response(Map, RolloverConfiguration)} + */ + @Deprecated public Response( Map componentTemplates, @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention + @Nullable DataStreamGlobalRetention ignored ) { + this(componentTemplates, rolloverConfiguration); + } + + public Response(Map componentTemplates) { + this(componentTemplates, (RolloverConfiguration) null); + } + + public Response(Map componentTemplates, @Nullable RolloverConfiguration rolloverConfiguration) { this.componentTemplates = componentTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public Map getComponentTemplates() { @@ -161,8 +173,14 @@ public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } + /** + * @return null + * @deprecated The global retention is not used anymore in the component template response + */ + @Deprecated + @Nullable public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; + return null; } @Override @@ -171,8 +189,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -182,13 +201,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Response that = (Response) o; return Objects.equals(componentTemplates, that.componentTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(componentTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(componentTemplates, rolloverConfiguration); } @Override @@ -208,5 +226,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 5cb35d23c8b7c..ba07c87e753e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -122,8 +122,6 @@ public static class Response extends ActionResponse implements ToXContentObject private final Map indexTemplates; @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; public Response(StreamInput in) throws IOException { super(in); @@ -133,33 +131,57 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { - globalRetention = in.readOptionalWriteable(DataStreamGlobalRetention::read); - } else { - globalRetention = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); } } + /** + * Please use {@link GetComposableIndexTemplateAction.Response#Response(Map)} + */ public Response(Map indexTemplates, @Nullable DataStreamGlobalRetention globalRetention) { - this(indexTemplates, null, globalRetention); + this(indexTemplates, (RolloverConfiguration) null); } + /** + * Please use {@link GetComposableIndexTemplateAction.Response#Response(Map, RolloverConfiguration)} + */ + @Deprecated public Response( Map indexTemplates, @Nullable RolloverConfiguration rolloverConfiguration, @Nullable DataStreamGlobalRetention globalRetention ) { + this(indexTemplates, rolloverConfiguration); + } + + public Response(Map indexTemplates) { + this(indexTemplates, (RolloverConfiguration) null); + } + + public Response(Map indexTemplates, @Nullable RolloverConfiguration rolloverConfiguration) { this.indexTemplates = indexTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public Map indexTemplates() { return indexTemplates; } + /** + * @return null + * @deprecated global retention is not used in composable templates anymore + */ + @Deprecated + @Nullable public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; + return null; + } + + @Nullable + public RolloverConfiguration getRolloverConfiguration() { + return rolloverConfiguration; } @Override @@ -168,8 +190,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -178,14 +201,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; GetComposableIndexTemplateAction.Response that = (GetComposableIndexTemplateAction.Response) o; - return Objects.equals(indexTemplates, that.indexTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + return Objects.equals(indexTemplates, that.indexTemplates) && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(indexTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(indexTemplates, rolloverConfiguration); } @Override @@ -203,7 +224,5 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index 0f088e046dd52..fcc053b8181fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -16,14 +16,13 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComponentTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,7 +35,6 @@ public class TransportGetComponentTemplateAction extends TransportMasterNodeRead GetComponentTemplateAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; @Inject public TransportGetComponentTemplateAction( @@ -44,8 +42,7 @@ public TransportGetComponentTemplateAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionResolver globalRetentionResolver + IndexNameExpressionResolver indexNameExpressionResolver ) { super( GetComponentTemplateAction.NAME, @@ -59,7 +56,6 @@ public TransportGetComponentTemplateAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -100,12 +96,11 @@ protected void masterOperation( listener.onResponse( new GetComponentTemplateAction.Response( results, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetentionResolver.resolve(state) + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new GetComponentTemplateAction.Response(results, globalRetentionResolver.resolve(state))); + listener.onResponse(new GetComponentTemplateAction.Response(results)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index 6d6de026ced5a..e2ce172a1bf0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -16,14 +16,13 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -36,7 +35,6 @@ public class TransportGetComposableIndexTemplateAction extends TransportMasterNo GetComposableIndexTemplateAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; @Inject public TransportGetComposableIndexTemplateAction( @@ -44,8 +42,7 @@ public TransportGetComposableIndexTemplateAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionResolver globalRetentionResolver + IndexNameExpressionResolver indexNameExpressionResolver ) { super( GetComposableIndexTemplateAction.NAME, @@ -59,7 +56,6 @@ public TransportGetComposableIndexTemplateAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -98,12 +94,11 @@ protected void masterOperation( listener.onResponse( new GetComposableIndexTemplateAction.Response( results, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetentionResolver.resolve(state) + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new GetComposableIndexTemplateAction.Response(results, globalRetentionResolver.resolve(state))); + listener.onResponse(new GetComposableIndexTemplateAction.Response(results)); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index ae5bbe38de801..40815e87d26f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 5d0a4a293ea4f..a27defd2c655c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -46,27 +46,19 @@ public class SimulateIndexTemplateResponse extends ActionResponse implements ToX @Nullable private final RolloverConfiguration rolloverConfiguration; - @Nullable - private final DataStreamGlobalRetention globalRetention; - public SimulateIndexTemplateResponse( - @Nullable Template resolvedTemplate, - @Nullable Map> overlappingTemplates, - DataStreamGlobalRetention globalRetention - ) { - this(resolvedTemplate, overlappingTemplates, null, globalRetention); + public SimulateIndexTemplateResponse(@Nullable Template resolvedTemplate, @Nullable Map> overlappingTemplates) { + this(resolvedTemplate, overlappingTemplates, null); } public SimulateIndexTemplateResponse( @Nullable Template resolvedTemplate, @Nullable Map> overlappingTemplates, - @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention + @Nullable RolloverConfiguration rolloverConfiguration ) { this.resolvedTemplate = resolvedTemplate; this.overlappingTemplates = overlappingTemplates; this.rolloverConfiguration = rolloverConfiguration; - this.globalRetention = globalRetention; } public RolloverConfiguration getRolloverConfiguration() { @@ -89,9 +81,10 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null; - globalRetention = in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION) - ? in.readOptionalWriteable(DataStreamGlobalRetention::read) - : null; + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + in.readOptionalWriteable(DataStreamGlobalRetention::read); + } } @Override @@ -110,8 +103,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { - out.writeOptionalWriteable(globalRetention); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) + && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + out.writeOptionalWriteable(null); } } @@ -147,13 +141,12 @@ public boolean equals(Object o) { SimulateIndexTemplateResponse that = (SimulateIndexTemplateResponse) o; return Objects.equals(resolvedTemplate, that.resolvedTemplate) && Objects.deepEquals(overlappingTemplates, that.overlappingTemplates) - && Objects.equals(rolloverConfiguration, that.rolloverConfiguration) - && Objects.equals(globalRetention, that.globalRetention); + && Objects.equals(rolloverConfiguration, that.rolloverConfiguration); } @Override public int hashCode() { - return Objects.hash(resolvedTemplate, overlappingTemplates, rolloverConfiguration, globalRetention); + return Objects.hash(resolvedTemplate, overlappingTemplates, rolloverConfiguration); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index eb90e95cb08be..6fcaad47e0d72 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -16,8 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -28,7 +26,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -41,6 +38,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -74,7 +72,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final Set indexSettingProviders; private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; @Inject public TransportSimulateIndexTemplateAction( @@ -87,8 +84,7 @@ public TransportSimulateIndexTemplateAction( NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, - IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionResolver globalRetentionResolver + IndexSettingProviders indexSettingProviders ) { super( SimulateIndexTemplateAction.NAME, @@ -108,7 +104,6 @@ public TransportSimulateIndexTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -118,7 +113,6 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - final DataStreamGlobalRetention globalRetention = globalRetentionResolver.resolve(state); final ClusterState stateWithTemplate; if (request.getIndexTemplateRequest() != null) { // we'll "locally" add the template defined by the user in the cluster state (as if it existed in the system) @@ -144,7 +138,7 @@ protected void masterOperation( String matchingTemplate = findV2Template(stateWithTemplate.metadata(), request.getIndexName(), false); if (matchingTemplate == null) { - listener.onResponse(new SimulateIndexTemplateResponse(null, null, null)); + listener.onResponse(new SimulateIndexTemplateResponse(null, null)); return; } @@ -172,12 +166,11 @@ protected void masterOperation( new SimulateIndexTemplateResponse( template, overlapping, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetention + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping, globalRetention)); + listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index b1d6d2814f157..ead00dc858a47 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -15,21 +15,19 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionResolver; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -60,7 +58,6 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final Set indexSettingProviders; private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; @Inject public TransportSimulateTemplateAction( @@ -73,8 +70,7 @@ public TransportSimulateTemplateAction( NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, - IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionResolver globalRetentionResolver + IndexSettingProviders indexSettingProviders ) { super( SimulateTemplateAction.NAME, @@ -94,7 +90,6 @@ public TransportSimulateTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); - this.globalRetentionResolver = globalRetentionResolver; } @Override @@ -104,7 +99,6 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - final DataStreamGlobalRetention globalRetention = globalRetentionResolver.resolve(state); String uuid = UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); final String temporaryIndexName = "simulate_template_index_" + uuid; final ClusterState stateWithTemplate; @@ -182,12 +176,11 @@ protected void masterOperation( new SimulateIndexTemplateResponse( template, overlapping, - clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING), - globalRetention + clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) ) ); } else { - listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping, globalRetention)); + listener.onResponse(new SimulateIndexTemplateResponse(template, overlapping)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 335c0781fb884..38e9918aae853 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -22,10 +22,10 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 86c6109469477..34a0d2e358acf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -28,12 +28,12 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 5e053a1e9fc7c..8611d2688f5d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -22,10 +22,10 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 1c3d168b8889d..5e0d9c9338e0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -26,13 +26,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; @@ -121,7 +121,11 @@ protected void doExecute(Task task, ValidateQueryRequest request, ActionListener if (request.query() == null) { rewriteListener.onResponse(request.query()); } else { - Rewriteable.rewriteAndFetch(request.query(), searchService.getRewriteContext(timeProvider, resolvedIndices), rewriteListener); + Rewriteable.rewriteAndFetch( + request.query(), + searchService.getRewriteContext(timeProvider, resolvedIndices, null), + rewriteListener + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 425461d1f4ba1..7c1304f92eefd 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -101,11 +101,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(primaryResponse); } - public void writeThin(StreamOutput out) throws IOException { - out.writeVInt(id); - DocWriteRequest.writeDocumentRequestThin(out, request); - out.writeOptionalWriteable(primaryResponse == null ? null : primaryResponse::writeThin); - } + public static final Writer THIN_WRITER = (out, item) -> { + out.writeVInt(item.id); + DocWriteRequest.writeDocumentRequestThin(out, item.request); + out.writeOptional(BulkItemResponse.THIN_WRITER, item.primaryResponse); + }; @Override public long ramBytesUsed() { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 151e8795d0f82..d3e550eaf05b3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -264,7 +264,7 @@ public String toString() { id = in.readVInt(); opType = OpType.fromId(in.readByte()); response = readResponse(shardId, in); - failure = in.readBoolean() ? new Failure(in) : null; + failure = in.readOptionalWriteable(Failure::new); assertConsistent(); } @@ -272,7 +272,7 @@ public String toString() { id = in.readVInt(); opType = OpType.fromId(in.readByte()); response = readResponse(in); - failure = in.readBoolean() ? new Failure(in) : null; + failure = in.readOptionalWriteable(Failure::new); assertConsistent(); } @@ -384,31 +384,21 @@ public void writeTo(StreamOutput out) throws IOException { writeResponseType(out); response.writeTo(out); } - if (failure == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - failure.writeTo(out); - } + out.writeOptionalWriteable(failure); } - public void writeThin(StreamOutput out) throws IOException { - out.writeVInt(id); - out.writeByte(opType.getId()); + public static final Writer THIN_WRITER = (out, item) -> { + out.writeVInt(item.id); + out.writeByte(item.opType.getId()); - if (response == null) { + if (item.response == null) { out.writeByte((byte) 2); } else { - writeResponseType(out); - response.writeThin(out); + item.writeResponseType(out); + item.response.writeThin(out); } - if (failure == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - failure.writeTo(out); - } - } + out.writeOptionalWriteable(item.failure); + }; private void writeResponseType(StreamOutput out) throws IOException { if (response instanceof SimulateIndexResponse) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 258e5b4c9a58d..813203afe42c5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -91,6 +93,7 @@ final class BulkOperation extends ActionRunnable { private final OriginSettingClient rolloverClient; private final Set failureStoresToBeRolledOver = ConcurrentCollections.newConcurrentSet(); private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); + private final FailureStoreMetrics failureStoreMetrics; BulkOperation( Task task, @@ -104,7 +107,8 @@ final class BulkOperation extends ActionRunnable { IndexNameExpressionResolver indexNameExpressionResolver, LongSupplier relativeTimeProvider, long startTimeNanos, - ActionListener listener + ActionListener listener, + FailureStoreMetrics failureStoreMetrics ) { this( task, @@ -120,7 +124,8 @@ final class BulkOperation extends ActionRunnable { startTimeNanos, listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), - new FailureStoreDocumentConverter() + new FailureStoreDocumentConverter(), + failureStoreMetrics ); } @@ -138,7 +143,8 @@ final class BulkOperation extends ActionRunnable { long startTimeNanos, ActionListener listener, ClusterStateObserver observer, - FailureStoreDocumentConverter failureStoreDocumentConverter + FailureStoreDocumentConverter failureStoreDocumentConverter, + FailureStoreMetrics failureStoreMetrics ) { super(listener); this.task = task; @@ -156,6 +162,7 @@ final class BulkOperation extends ActionRunnable { this.observer = observer; this.failureStoreDocumentConverter = failureStoreDocumentConverter; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } @Override @@ -437,17 +444,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { // We zip the requests and responses together so that we can identify failed documents and potentially store them BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; if (bulkItemResponse.isFailed()) { - BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; - - DataStream failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - var cause = bulkItemResponse.getFailure().getCause(); - addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreReference.getName()); - } + processFailure(bulkItemRequest, bulkItemResponse.getFailure().getCause()); addFailure(bulkItemResponse); } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); @@ -464,11 +465,7 @@ public void onFailure(Exception e) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - DataStream failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - addDocumentToRedirectRequests(request, e, failureStoreReference.getName()); - } + processFailure(request, e); addFailure(docWriteRequest, request.id(), indexName, e); } completeShardOperation(); @@ -479,45 +476,56 @@ private void completeShardOperation() { clusterState = null; releaseOnFinish.close(); } + + private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { + var errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(cause)); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + DataStream failureStoreCandidate = getRedirectTargetCandidate(docWriteRequest, getClusterState().metadata()); + // If the candidate is not null, the BulkItemRequest targets a data stream, but we'll still have to check if + // it has the failure store enabled. + if (failureStoreCandidate != null) { + // Do not redirect documents to a failure store that were already headed to one. + var isFailureStoreDoc = docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); + if (isFailureStoreDoc == false && failureStoreCandidate.isFailureStoreEnabled()) { + // Redirect to failure store. + maybeMarkFailureStoreForRollover(failureStoreCandidate); + addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); + failureStoreMetrics.incrementFailureStore( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD + ); + } else { + // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled + // or this request was already targeting a failure store), we increment the rejected counter. + failureStoreMetrics.incrementRejected( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD, + isFailureStoreDoc + ); + } + } + } }); } /** - * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream - * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for - * the redirection is returned. + * Tries to find a candidate redirect target for this write request. A candidate redirect target is a data stream that may or + * may not have the failure store enabled. * * @param docWriteRequest the write request to check * @param metadata cluster state metadata for resolving index abstractions - * @return a data stream if the write request points to a data stream that has the failure store enabled, or {@code null} if it does not + * @return a data stream if the write request points to a data stream, or {@code null} if it does not */ - private static DataStream getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + private static DataStream getRedirectTargetCandidate(DocWriteRequest docWriteRequest, Metadata metadata) { // Feature flag guard if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } - // Do not resolve a failure store for documents that were already headed to one - if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { - return null; - } // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); - if (ia == null) { - return null; - } - if (ia.isDataStreamRelated()) { - // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they - // will write to, not which _data stream_. - // We work backward to find the data stream from the concrete write index to cover this case. - Index concreteIndex = ia.getWriteIndex(); - IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); - DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. - return parentDataStream; - } - } - return null; + return DataStream.resolveDataStream(ia, metadata); } /** diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index ac3a6be70d29f..28eef30f9185d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -18,7 +18,6 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; import java.util.Arrays; import java.util.List; @@ -63,7 +62,6 @@ enum ItemProcessingState { private BulkItemResponse executionResult; private int updateRetryCounter; private long noopMappingUpdateRetryForMappingVersion; - private DocumentSizeObserver documentSizeObserver = DocumentSizeObserver.EMPTY_INSTANCE; BulkPrimaryExecutionContext(BulkShardRequest request, IndexShard primary) { this.request = request; @@ -369,12 +367,4 @@ private boolean assertInvariants(ItemProcessingState... expectedCurrentState) { } return true; } - - public void setDocumentSizeObserver(DocumentSizeObserver documentSizeObserver) { - this.documentSizeObserver = documentSizeObserver; - } - - public DocumentSizeObserver getDocumentSizeObserver() { - return documentSizeObserver; - } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 83b572afb2853..1a8bdb1c885c6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -87,6 +87,9 @@ public BulkRequest(StreamInput in) throws IOException { requests.addAll(in.readCollectionAsList(i -> DocWriteRequest.readDocumentRequest(null, i))); refreshPolicy = RefreshPolicy.readFrom(in); timeout = in.readTimeValue(); + for (DocWriteRequest request : requests) { + indices.add(Objects.requireNonNull(request.index(), "request index must not be null")); + } } public BulkRequest(@Nullable String globalIndex) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 75ab08de942dc..898bfd0e1652c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -139,7 +139,7 @@ public void parse( XContent xContent = xContentType.xContent(); int line = 0; int from = 0; - byte marker = xContent.streamSeparator(); + byte marker = xContent.bulkSeparator(); // Bulk requests can contain a lot of repeated strings for the index, pipeline and routing parameters. This map is used to // deduplicate duplicate strings parsed for these parameters. While it does not prevent instantiating the duplicate strings, it // reduces their lifetime to the lifetime of this parse call instead of the lifetime of the full bulk request. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 0d2942e688382..f7860c47d8b73 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -130,14 +130,7 @@ public void writeTo(StreamOutput out) throws IOException { throw new IllegalStateException("Inference metadata should have been consumed before writing to the stream"); } super.writeTo(out); - out.writeArray((o, item) -> { - if (item != null) { - o.writeBoolean(true); - item.writeThin(o); - } else { - o.writeBoolean(false); - } - }, items); + out.writeArray((o, item) -> o.writeOptional(BulkItemRequest.THIN_WRITER, item), items); if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_VALIDATES_MAPPINGS)) { out.writeBoolean(isSimulated); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 3eeb96546c9b0..eb1bb0468c9bb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -56,6 +56,6 @@ public void setForcedRefresh(boolean forcedRefresh) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - out.writeArray((o, item) -> item.writeThin(o), responses); + out.writeArray(BulkItemResponse.THIN_WRITER, responses); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java index cc9f9b8ee1ce7..527a886905aaf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java @@ -32,6 +32,8 @@ */ public class FailureStoreDocumentConverter { + private static final int STACKTRACE_PRINT_DEPTH = 2; + private static final Set INGEST_EXCEPTION_HEADERS = Set.of( PIPELINE_ORIGIN_EXCEPTION_HEADER, PROCESSOR_TAG_EXCEPTION_HEADER, @@ -68,20 +70,14 @@ public IndexRequest transformFailedRequest( Supplier timeSupplier ) throws IOException { return new IndexRequest().index(targetIndexName) - .source(createSource(source, exception, targetIndexName, timeSupplier)) + .source(createSource(source, exception, timeSupplier)) .opType(DocWriteRequest.OpType.CREATE) .setWriteToFailureStore(true); } - private static XContentBuilder createSource( - IndexRequest source, - Exception exception, - String targetIndexName, - Supplier timeSupplier - ) throws IOException { + private static XContentBuilder createSource(IndexRequest source, Exception exception, Supplier timeSupplier) throws IOException { Objects.requireNonNull(source, "source must not be null"); Objects.requireNonNull(exception, "exception must not be null"); - Objects.requireNonNull(targetIndexName, "targetIndexName must not be null"); Objects.requireNonNull(timeSupplier, "timeSupplier must not be null"); Throwable unwrapped = ExceptionsHelper.unwrapCause(exception); XContentBuilder builder = JsonXContent.contentBuilder(); @@ -96,7 +92,9 @@ private static XContentBuilder createSource( if (source.routing() != null) { builder.field("routing", source.routing()); } - builder.field("index", targetIndexName); + if (source.index() != null) { + builder.field("index", source.index()); + } // Unmapped source field builder.startObject("source"); { @@ -109,7 +107,7 @@ private static XContentBuilder createSource( { builder.field("type", ElasticsearchException.getExceptionName(unwrapped)); builder.field("message", unwrapped.getMessage()); - builder.field("stack_trace", ExceptionsHelper.stackTrace(unwrapped)); + builder.field("stack_trace", ExceptionsHelper.limitedStackTrace(unwrapped, STACKTRACE_PRINT_DEPTH)); // Try to find the IngestProcessorException somewhere in the stack trace. Since IngestProcessorException is package-private, // we can't instantiate it in tests, so we'll have to check for the headers directly. var ingestException = ExceptionsHelper.unwrapCausesAndSuppressed( diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java new file mode 100644 index 0000000000000..5a36f10785790 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Map; + +/** + * A class containing APM metrics for failure stores. See the JavaDoc on the individual methods for an explanation on what they're tracking. + * General notes: + *

    + *
  • When a document is rerouted in a pipeline, the destination data stream is used for the metric attribute(s).
  • + *
+ */ +public class FailureStoreMetrics { + + public static final FailureStoreMetrics NOOP = new FailureStoreMetrics(MeterRegistry.NOOP); + + public static final String METRIC_TOTAL = "es.data_stream.ingest.documents.total"; + public static final String METRIC_FAILURE_STORE = "es.data_stream.ingest.documents.failure_store.total"; + public static final String METRIC_REJECTED = "es.data_stream.ingest.documents.rejected.total"; + + private final LongCounter totalCounter; + private final LongCounter failureStoreCounter; + private final LongCounter rejectedCounter; + + public FailureStoreMetrics(MeterRegistry meterRegistry) { + totalCounter = meterRegistry.registerLongCounter(METRIC_TOTAL, "total number of documents that were sent to a data stream", "unit"); + failureStoreCounter = meterRegistry.registerLongCounter( + METRIC_FAILURE_STORE, + "number of documents that got redirected to the failure store", + "unit" + ); + rejectedCounter = meterRegistry.registerLongCounter(METRIC_REJECTED, "number of documents that were rejected", "unit"); + } + + /** + * This counter tracks the number of documents that we tried to index into a data stream. This includes documents + * that were dropped by a pipeline. This counter will only be incremented once for every incoming document (even when it gets + * redirected to the failure store and/or gets rejected). + * @param dataStream the name of the data stream + */ + public void incrementTotal(String dataStream) { + totalCounter.incrementBy(1, Map.of("data_stream", dataStream)); + } + + /** + * This counter tracks the number of documents that we tried to store into a failure store. This includes both pipeline and + * shard-level failures. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + */ + public void incrementFailureStore(String dataStream, String errorType, ErrorLocation errorLocation) { + failureStoreCounter.incrementBy( + 1, + Map.of("data_stream", dataStream, "error_type", errorType, "error_location", errorLocation.name()) + ); + } + + /** + * This counter tracks the number of documents that failed to get stored in Elasticsearch. Meaning, any document that did not get + * stored in the data stream or in its failure store. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + * @param failureStore whether this failure occurred while trying to ingest into a failure store (true) or in the data + * stream itself (false) + */ + public void incrementRejected(String dataStream, String errorType, ErrorLocation errorLocation, boolean failureStore) { + rejectedCounter.incrementBy( + 1, + Map.of( + "data_stream", + dataStream, + "error_type", + errorType, + "error_location", + errorLocation.name(), + "failure_store", + failureStore + ) + ); + } + + public enum ErrorLocation { + PIPELINE, + SHARD; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index ff306cfb08745..74864abe3ec50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -56,7 +56,7 @@ public abstract class TransportAbstractBulkAction extends HandledTransportAction protected final SystemIndices systemIndices; private final IngestService ingestService; private final IngestActionForwarder ingestForwarder; - protected final LongSupplier relativeTimeProvider; + protected final LongSupplier relativeTimeNanosProvider; protected final Executor writeExecutor; protected final Executor systemWriteExecutor; private final ActionType bulkAction; @@ -71,7 +71,7 @@ public TransportAbstractBulkAction( IngestService ingestService, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeNanosProvider ) { super(action.name(), transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; @@ -83,7 +83,7 @@ public TransportAbstractBulkAction( this.systemWriteExecutor = threadPool.executor(ThreadPool.Names.SYSTEM_WRITE); this.ingestForwarder = new IngestActionForwarder(transportService); clusterService.addStateApplier(this.ingestForwarder); - this.relativeTimeProvider = relativeTimeProvider; + this.relativeTimeNanosProvider = relativeTimeNanosProvider; this.bulkAction = action; } @@ -216,13 +216,13 @@ private void processBulkIndexIngestRequest( Metadata metadata, ActionListener listener ) { - final long ingestStartTimeInNanos = System.nanoTime(); + final long ingestStartTimeInNanos = relativeTimeNanos(); final BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); getIngestService(original).executeBulkRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, - (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), + (indexName) -> resolveFailureStore(indexName, metadata, threadPool.absoluteTimeInMillis()), bulkRequestModifier::markItemForFailureStore, bulkRequestModifier::markItemAsFailed, (originalThread, exception) -> { @@ -230,7 +230,7 @@ private void processBulkIndexIngestRequest( logger.debug("failed to execute pipeline for a bulk request", exception); listener.onFailure(exception); } else { - long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); + long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeNanos() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded( ingestTookInMillis, @@ -274,13 +274,15 @@ public boolean isForceExecution() { /** * Determines if an index name is associated with either an existing data stream or a template * for one that has the failure store enabled. + * * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. + * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a + * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. */ - protected abstract boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis); + protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); /** * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are @@ -307,12 +309,12 @@ protected IngestService getIngestService(BulkRequest request) { return ingestService; } - protected long relativeTime() { - return relativeTimeProvider.getAsLong(); + protected long relativeTimeNanos() { + return relativeTimeNanosProvider.getAsLong(); } protected long buildTookInMillis(long startTimeNanos) { - return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos); + return TimeUnit.NANOSECONDS.toMillis(relativeTimeNanos() - startTimeNanos); } private void applyPipelinesAndDoInternalExecute( @@ -321,9 +323,9 @@ private void applyPipelinesAndDoInternalExecute( Executor executor, ActionListener listener ) { - final long relativeStartTime = threadPool.relativeTimeInMillis(); + final long relativeStartTimeNanos = relativeTimeNanos(); if (applyPipelines(task, bulkRequest, executor, listener) == false) { - doInternalExecute(task, bulkRequest, executor, listener, relativeStartTime); + doInternalExecute(task, bulkRequest, executor, listener, relativeStartTimeNanos); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index d9d5bc92a24d1..bdda4ff487f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -39,16 +39,15 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -57,7 +56,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.Executor; @@ -82,6 +80,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; + private final FailureStoreMetrics failureStoreMetrics; @Inject public TransportBulkAction( @@ -94,7 +93,8 @@ public TransportBulkAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, - SystemIndices systemIndices + SystemIndices systemIndices, + FailureStoreMetrics failureStoreMetrics ) { this( threadPool, @@ -107,7 +107,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - System::nanoTime + threadPool::relativeTimeInNanos, + failureStoreMetrics ); } @@ -122,7 +123,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { this( TYPE, @@ -137,7 +139,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - relativeTimeProvider + relativeTimeProvider, + failureStoreMetrics ); } @@ -154,7 +157,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { super( bulkAction, @@ -173,6 +177,7 @@ public TransportBulkAction( this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } public static ActionListener unwrappingSingleItemBulkResponse( @@ -197,8 +202,10 @@ protected void doInternalExecute( BulkRequest bulkRequest, Executor executor, ActionListener listener, - long relativeStartTime + long relativeStartTimeNanos ) { + trackIndexRequests(bulkRequest); + Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); @@ -212,10 +219,31 @@ protected void doInternalExecute( indicesToAutoCreate, dataStreamsToBeRolledOver, failureStoresToBeRolledOver, - relativeStartTime + relativeStartTimeNanos ); } + /** + * Track the number of index requests in our APM metrics. We'll track almost all docs here (pipeline or no pipeline, + * failure store or original), but some docs don't reach this place (dropped and rejected docs), so we increment for those docs in + * different places. + */ + private void trackIndexRequests(BulkRequest bulkRequest) { + final Metadata metadata = clusterService.state().metadata(); + for (DocWriteRequest request : bulkRequest.requests) { + if (request instanceof IndexRequest == false) { + continue; + } + String resolvedIndexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(resolvedIndexName); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, metadata); + // We only track index requests into data streams. + if (dataStream != null) { + failureStoreMetrics.incrementTotal(dataStream.getName()); + } + } + } + /** * Determine all the targets (i.e. indices, data streams, failure stores) that require an action before we can proceed with the bulk * request. Indices might need to be created, and data streams and failure stores might need to be rolled over when they're marked @@ -309,19 +337,19 @@ protected void createMissingIndicesAndIndexData( Map indicesToAutoCreate, Set dataStreamsToBeRolledOver, Set failureStoresToBeRolledOver, - long startTime + long startTimeNanos ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); // Optimizing when there are no prerequisite actions if (indicesToAutoCreate.isEmpty() && dataStreamsToBeRolledOver.isEmpty() && failureStoresToBeRolledOver.isEmpty()) { - executeBulk(task, bulkRequest, startTime, listener, executor, responses, Map.of()); + executeBulk(task, bulkRequest, startTimeNanos, listener, executor, responses, Map.of()); return; } final Map indicesThatCannotBeCreated = new HashMap<>(); Runnable executeBulkRunnable = () -> executor.execute(new ActionRunnable<>(listener) { @Override protected void doRun() { - executeBulk(task, bulkRequest, startTime, listener, executor, responses, indicesThatCannotBeCreated); + executeBulk(task, bulkRequest, startTimeNanos, listener, executor, responses, indicesThatCannotBeCreated); } }); try (RefCountingRunnable refs = new RefCountingRunnable(executeBulkRunnable)) { @@ -533,31 +561,31 @@ void executeBulk( responses, indicesThatCannotBeCreated, indexNameExpressionResolver, - relativeTimeProvider, + relativeTimeNanosProvider, startTimeNanos, - listener + listener, + failureStoreMetrics ).run(); } /** - * Determines if an index name is associated with either an existing data stream or a template - * for one that has the failure store enabled. - * @param indexName The index name to check. - * @param metadata Cluster state metadata. - * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if the given index name corresponds to a data stream with a failure store, - * or if it matches a template that has a data stream failure store enabled. + * See {@link #resolveFailureStore(String, Metadata, long)} */ - static boolean shouldStoreFailureInternal(String indexName, Metadata metadata, long epochMillis) { - return DataStream.isFailureStoreFeatureFlagEnabled() - && resolveFailureStoreFromMetadata(indexName, metadata, epochMillis).or( - () -> resolveFailureStoreFromTemplate(indexName, metadata) - ).orElse(false); + // Visibility for testing + static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { + return null; + } + var resolution = resolveFailureStoreFromMetadata(indexName, metadata, epochMillis); + if (resolution != null) { + return resolution; + } + return resolveFailureStoreFromTemplate(indexName, metadata); } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { - return shouldStoreFailureInternal(indexName, metadata, time); + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long time) { + return resolveFailureInternal(indexName, metadata, time); } /** @@ -567,30 +595,24 @@ protected boolean shouldStoreFailure(String indexName, Metadata metadata, long t * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Optional resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { - return Optional.empty(); + return null; } // Get index abstraction, resolving date math if it exists IndexAbstraction indexAbstraction = metadata.getIndicesLookup() .get(IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis)); - - // We only store failures if the failure is being written to a data stream, - // not when directly writing to backing indices/failure stores if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { - return Optional.empty(); + return null; } - // Locate the write index for the abstraction, and check if it has a data stream associated with it. - // This handles alias resolution as well as data stream resolution. - Index writeIndex = indexAbstraction.getWriteIndex(); - assert writeIndex != null : "Could not resolve write index for resource [" + indexName + "]"; - IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); - DataStream targetDataStream = writeAbstraction.getParentDataStream(); + // We only store failures if the failure is being written to a data stream, + // not when directly writing to backing indices/failure stores + DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return Optional.of(targetDataStream != null && targetDataStream.isFailureStoreEnabled()); + return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); } /** @@ -599,9 +621,9 @@ private static Optional resolveFailureStoreFromMetadata(String indexNam * @param metadata Cluster state metadata. * @return true if the given index name corresponds to an index template with a data stream failure store enabled. */ - private static Optional resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { if (indexName == null) { - return Optional.empty(); + return null; } // Check to see if the index name matches any templates such that an index would have been attributed @@ -612,11 +634,11 @@ private static Optional resolveFailureStoreFromTemplate(String indexNam ComposableIndexTemplate composableIndexTemplate = metadata.templatesV2().get(template); if (composableIndexTemplate.getDataStreamTemplate() != null) { // Check if the data stream has the failure store enabled - return Optional.of(composableIndexTemplate.getDataStreamTemplate().hasFailureStore()); + return composableIndexTemplate.getDataStreamTemplate().hasFailureStore(); } } // Could not locate a failure store via template - return Optional.empty(); + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index fc9df7bbf73b9..ac9ceb44e5b76 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; @@ -57,9 +56,10 @@ import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -117,7 +117,8 @@ public TransportShardBulkAction( ExecutorSelector.getWriteExecutorForShard(threadPool), PrimaryActionExecution.RejectOnOverload, indexingPressure, - systemIndices + systemIndices, + ReplicaActionExecution.SubjectToCircuitBreaker ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; @@ -363,16 +364,14 @@ static boolean executeBulkItemRequest( } else { final IndexRequest request = context.getRequestToExecute(); - DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); - - context.setDocumentSizeObserver(documentSizeObserver); + XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final SourceToParse sourceToParse = new SourceToParse( request.id(), request.source(), request.getContentType(), request.routing(), request.getDynamicTemplates(), - documentSizeObserver + meteringParserDecorator ); result = primary.applyIndexOperationOnPrimary( version, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index c08ed6413a7a1..2312a75b91084 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexingPressure; @@ -31,7 +30,8 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.SimulateIngestService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -68,7 +68,7 @@ public TransportSimulateBulkAction( ingestService, indexingPressure, systemIndices, - System::nanoTime + threadPool::relativeTimeInNanos ); this.indicesService = indicesService; } @@ -79,7 +79,7 @@ protected void doInternalExecute( BulkRequest bulkRequest, Executor executor, ActionListener listener, - long relativeStartTime + long relativeStartTimeNanos ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); for (int i = 0; i < bulkRequest.requests.size(); i++) { @@ -105,7 +105,7 @@ protected void doInternalExecute( ); } listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(relativeStartTime)) + new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(relativeStartTimeNanos)) ); } @@ -122,7 +122,7 @@ private Exception validateMappings(IndexRequest request) { request.getContentType(), request.routing(), request.getDynamicTemplates(), - DocumentSizeObserver.EMPTY_INSTANCE + XContentMeteringParserDecorator.NOOP ); ClusterState state = clusterService.state(); @@ -166,8 +166,8 @@ protected IngestService getIngestService(BulkRequest request) { } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis) { // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store - return false; + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index 40060d5e5d927..7c788b10405fc 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -14,9 +14,11 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Objects; @@ -35,18 +37,23 @@ public static class Request extends AcknowledgedRequest implements Indi private final String name; private final long startTime; - public Request(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(masterNodeTimeout, ackTimeout); this.name = name; this.startTime = System.currentTimeMillis(); } - public Request(String name, long startTime) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name, long startTime) { + super(masterNodeTimeout, ackTimeout); this.name = name; this.startTime = startTime; } + @Deprecated(forRemoval = true) // temporary compatibility shim + public Request(String name) { + this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, name); + } + public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 5b79eae0cebfd..1a62e347012fe 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Arrays; @@ -46,12 +47,17 @@ public static class Request extends MasterNodeRequest implements Indice private final boolean wildcardExpressionsOriginallySpecified; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); - public Request(String... names) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String... names) { + super(masterNodeTimeout); this.names = Objects.requireNonNull(names); this.wildcardExpressionsOriginallySpecified = Arrays.stream(names).anyMatch(Regex::isSimpleMatchPattern); } + @Deprecated(forRemoval = true) // temporary compatibility shim + public Request(String... names) { + this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); + } + public String[] getNames() { return names; } @@ -107,7 +113,7 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - public IndicesRequest indicesOptions(IndicesOptions options) { + public Request indicesOptions(IndicesOptions options) { this.indicesOptions = options; return this; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 841a2df5eada6..d09a90f9de4f0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.DataStream; @@ -25,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -57,17 +59,22 @@ public static class Request extends MasterNodeReadRequest implements In private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); private boolean includeDefaults = false; - public Request(String[] names) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String[] names) { + super(masterNodeTimeout); this.names = names; } - public Request(String[] names, boolean includeDefaults) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefaults) { + super(masterNodeTimeout); this.names = names; this.includeDefaults = includeDefaults; } + @Deprecated(forRemoval = true) // temporary compatibility shim + public Request(String[] names) { + this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); + } + public String[] getNames() { return names; } @@ -312,8 +319,7 @@ public XContentBuilder toXContent( } if (dataStream.getLifecycle() != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - dataStream.getLifecycle() - .toXContent(builder, params, rolloverConfiguration, dataStream.isSystem() ? null : globalRetention); + dataStream.getLifecycle().toXContent(builder, params, rolloverConfiguration, globalRetention, dataStream.isInternal()); } if (ilmPolicyName != null) { builder.field(ILM_POLICY_FIELD.getPreferredName(), ilmPolicyName); @@ -511,7 +517,7 @@ public Response(StreamInput in) throws IOException { this( in.readCollectionAsList(DataStreamInfo::new), in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null, - in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalWriteable(DataStreamGlobalRetention::read) : null ); @@ -537,7 +543,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeOptionalWriteable(globalRetention); } } @@ -549,7 +555,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (DataStreamInfo dataStream : dataStreams) { dataStream.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java index 226b8d44f636c..894708a282ea9 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Objects; @@ -34,8 +35,8 @@ public static class Request extends AcknowledgedRequest actions) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, List actions) { + super(masterNodeTimeout, ackTimeout); this.actions = Collections.unmodifiableList(actions); } @@ -98,13 +99,22 @@ public ActionRequestValidationException validate() { return null; } + public interface Factory { + Request create(List actions); + } + @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_actions", - args -> new Request(((List) args[0])) + false, + (args, factory) -> factory.create((List) args[0]) ); static { - PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), DataStreamAction.PARSER, new ParseField("actions")); + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> DataStreamAction.PARSER.parse(p, null), + new ParseField("actions") + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java index 0853d30d22de4..f423969b617f7 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Objects; @@ -34,8 +35,8 @@ public static class Request extends MasterNodeRequest implements In private boolean includeDefaults; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - public Request(String[] names) { - this(names, false); + public Request(TimeValue masterNodeTimeout, String[] names) { + this(masterNodeTimeout, names, false); } - public Request(String[] names, boolean includeDefaults) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefaults) { + super(masterNodeTimeout); this.names = names; this.includeDefaults = includeDefaults; } @@ -161,7 +162,7 @@ public Response(StreamInput in) throws IOException { super(in); this.indices = in.readCollectionAsList(ExplainIndexDataStreamLifecycle::new); this.rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); - this.globalRetention = in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION) + this.globalRetention = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalWriteable(DataStreamGlobalRetention::read) : null; } @@ -182,7 +183,7 @@ public DataStreamGlobalRetention getGlobalRetention() { public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indices); out.writeOptionalWriteable(rolloverConfiguration); - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeOptionalWriteable(globalRetention); } } @@ -216,7 +217,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP builder.field(explainIndexDataLifecycle.getIndex()); explainIndexDataLifecycle.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + DataStreamLifecycle.addEffectiveRetentionParams(outerParams), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java index 32be73a7b0960..962c2975f5998 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainIndexDataStreamLifecycle.java @@ -45,7 +45,7 @@ public class ExplainIndexDataStreamLifecycle implements Writeable, ToXContentObj private final String index; private final boolean managedByLifecycle; - private final boolean isSystemDataStream; + private final boolean isInternalDataStream; @Nullable private final Long indexCreationDate; @Nullable @@ -61,7 +61,7 @@ public class ExplainIndexDataStreamLifecycle implements Writeable, ToXContentObj public ExplainIndexDataStreamLifecycle( String index, boolean managedByLifecycle, - boolean isSystemDataStream, + boolean isInternalDataStream, @Nullable Long indexCreationDate, @Nullable Long rolloverDate, @Nullable TimeValue generationDate, @@ -70,7 +70,7 @@ public ExplainIndexDataStreamLifecycle( ) { this.index = index; this.managedByLifecycle = managedByLifecycle; - this.isSystemDataStream = isSystemDataStream; + this.isInternalDataStream = isInternalDataStream; this.indexCreationDate = indexCreationDate; this.rolloverDate = rolloverDate; this.generationDateMillis = generationDate == null ? null : generationDate.millis(); @@ -82,9 +82,9 @@ public ExplainIndexDataStreamLifecycle(StreamInput in) throws IOException { this.index = in.readString(); this.managedByLifecycle = in.readBoolean(); if (in.getTransportVersion().onOrAfter(TransportVersions.NO_GLOBAL_RETENTION_FOR_SYSTEM_DATA_STREAMS)) { - this.isSystemDataStream = in.readBoolean(); + this.isInternalDataStream = in.readBoolean(); } else { - this.isSystemDataStream = false; + this.isInternalDataStream = false; } if (managedByLifecycle) { this.indexCreationDate = in.readOptionalLong(); @@ -141,7 +141,7 @@ public XContentBuilder toXContent( } if (this.lifecycle != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, isSystemDataStream ? null : globalRetention); + lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention, isInternalDataStream); } if (this.error != null) { if (error.firstOccurrenceTimestamp() != -1L && error.recordedTimestamp() != -1L && error.retryCount() != -1) { @@ -161,7 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(index); out.writeBoolean(managedByLifecycle); if (out.getTransportVersion().onOrAfter(TransportVersions.NO_GLOBAL_RETENTION_FOR_SYSTEM_DATA_STREAMS)) { - out.writeBoolean(isSystemDataStream); + out.writeBoolean(isInternalDataStream); } if (managedByLifecycle) { out.writeOptionalLong(indexCreationDate); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index 9907310a8acbd..52af1341692eb 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -48,13 +49,13 @@ public static class Request extends MasterNodeReadRequest implements In private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); private boolean includeDefaults = false; - public Request(String[] names) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String[] names) { + super(masterNodeTimeout); this.names = names; } - public Request(String[] names, boolean includeDefaults) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefaults) { + super(masterNodeTimeout); this.names = names; this.includeDefaults = includeDefaults; } @@ -142,7 +143,7 @@ public static class Response extends ActionResponse implements ChunkedToXContent public record DataStreamLifecycle( String dataStreamName, @Nullable org.elasticsearch.cluster.metadata.DataStreamLifecycle lifecycle, - boolean isSystemDataStream + boolean isInternalDataStream ) implements Writeable, ToXContentObject { public static final ParseField NAME_FIELD = new ParseField("name"); @@ -161,7 +162,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(dataStreamName); out.writeOptionalWriteable(lifecycle); if (out.getTransportVersion().onOrAfter(TransportVersions.NO_GLOBAL_RETENTION_FOR_SYSTEM_DATA_STREAMS)) { - out.writeBoolean(isSystemDataStream); + out.writeBoolean(isInternalDataStream); } } @@ -186,9 +187,10 @@ public XContentBuilder toXContent( builder.field(LIFECYCLE_FIELD.getPreferredName()); lifecycle.toXContent( builder, - org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + org.elasticsearch.cluster.metadata.DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, - isSystemDataStream ? null : globalRetention + globalRetention, + isInternalDataStream ); } builder.endObject(); @@ -220,7 +222,7 @@ public Response(StreamInput in) throws IOException { this( in.readCollectionAsList(DataStreamLifecycle::new), in.readOptionalWriteable(RolloverConfiguration::new), - in.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalWriteable(DataStreamGlobalRetention::read) : null ); @@ -243,7 +245,7 @@ public DataStreamGlobalRetention getGlobalRetention() { public void writeTo(StreamOutput out) throws IOException { out.writeCollection(dataStreamLifecycles); out.writeOptionalWriteable(rolloverConfiguration); - if (out.getTransportVersion().onOrAfter(TransportVersions.USE_DATA_STREAM_GLOBAL_RETENTION)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeOptionalWriteable(globalRetention); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index 7bb63ae27b526..c32fa849d9fd3 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -45,9 +45,14 @@ private PutDataStreamLifecycleAction() {/* no instances */} public static final class Request extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public interface Factory { + Request create(@Nullable TimeValue dataRetention, @Nullable Boolean enabled, @Nullable Downsampling downsampling); + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "put_data_stream_lifecycle_request", - args -> new Request(null, ((TimeValue) args[0]), (Boolean) args[1], (Downsampling) args[2]) + false, + (args, factory) -> factory.create((TimeValue) args[0], (Boolean) args[1], (Downsampling) args[2]) ); static { @@ -62,13 +67,13 @@ public static final class Request extends AcknowledgedRequest implement if (p.currentToken() == XContentParser.Token.VALUE_NULL) { return Downsampling.NULL; } else { - return new Downsampling(AbstractObjectParser.parseArray(p, c, Downsampling.Round::fromXContent)); + return new Downsampling(AbstractObjectParser.parseArray(p, null, Downsampling.Round::fromXContent)); } }, DOWNSAMPLING_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_NULL); } - public static Request parseRequest(XContentParser parser) { - return PARSER.apply(parser, null); + public static Request parseRequest(XContentParser parser, Factory factory) { + return PARSER.apply(parser, factory); } private String[] names; @@ -90,22 +95,35 @@ public void writeTo(StreamOutput out) throws IOException { out.writeWriteable(lifecycle); } - public Request(String[] names, @Nullable TimeValue dataRetention) { - this(names, dataRetention, null, null); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String[] names, @Nullable TimeValue dataRetention) { + this(masterNodeTimeout, ackTimeout, names, dataRetention, null, null); } - public Request(String[] names, DataStreamLifecycle lifecycle) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String[] names, DataStreamLifecycle lifecycle) { + super(masterNodeTimeout, ackTimeout); this.names = names; this.lifecycle = lifecycle; } - public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Boolean enabled) { - this(names, dataRetention, enabled, null); - } - - public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Boolean enabled, @Nullable Downsampling downsampling) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String[] names, + @Nullable TimeValue dataRetention, + @Nullable Boolean enabled + ) { + this(masterNodeTimeout, ackTimeout, names, dataRetention, enabled, null); + } + + public Request( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String[] names, + @Nullable TimeValue dataRetention, + @Nullable Boolean enabled, + @Nullable Downsampling downsampling + ) { + super(masterNodeTimeout, ackTimeout); this.names = names; this.lifecycle = DataStreamLifecycle.newBuilder() .dataRetention(dataRetention) diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index f2b1dc7cd556c..40810f004b0de 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -30,6 +30,7 @@ public class DeleteRequestBuilder extends ReplicationRequestBuilder request.nowInMillis; - Rewriteable.rewriteAndFetch(request.query(), searchService.getRewriteContext(timeProvider, resolvedIndices), rewriteListener); + Rewriteable.rewriteAndFetch(request.query(), searchService.getRewriteContext(timeProvider, resolvedIndices, null), rewriteListener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 51cb05f981177..e435655668882 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -108,15 +108,15 @@ private FieldCapabilitiesIndexResponse doFetch( null, runtimeFields ); - + var indexMode = searchExecutionContext.getIndexSettings().getMode(); if (searcher != null && canMatchShard(shardId, indexFilter, nowInMillis, searchExecutionContext) == false) { - return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), null, Collections.emptyMap(), false); + return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), null, Collections.emptyMap(), false, indexMode); } final MappingMetadata mapping = indexService.getMetadata().mapping(); String indexMappingHash; if (includeEmptyFields || enableFieldHasValue == false) { - indexMappingHash = mapping != null ? mapping.getSha256() : null; + indexMappingHash = mapping != null ? mapping.getSha256() + indexMode : null; } else { // even if the mapping is the same if we return only fields with values we need // to make sure that we consider all the shard-mappings pair, that is why we @@ -129,7 +129,7 @@ private FieldCapabilitiesIndexResponse doFetch( indexMappingHash = fieldPredicate.modifyHash(indexMappingHash); final Map existing = indexMappingHashToResponses.get(indexMappingHash); if (existing != null) { - return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, existing, true); + return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, existing, true, indexMode); } } task.ensureNotCancelled(); @@ -145,7 +145,7 @@ private FieldCapabilitiesIndexResponse doFetch( if (indexMappingHash != null) { indexMappingHashToResponses.put(indexMappingHash, responseMap); } - return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, responseMap, true); + return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, responseMap, true, indexMode); } static Map retrieveFieldCaps( diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index cc72dd80dceac..5a50ed4c9f573 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexMode; import java.io.IOException; import java.util.ArrayList; @@ -33,18 +34,21 @@ public final class FieldCapabilitiesIndexResponse implements Writeable { private final Map responseMap; private final boolean canMatch; private final transient TransportVersion originVersion; + private final IndexMode indexMode; public FieldCapabilitiesIndexResponse( String indexName, @Nullable String indexMappingHash, Map responseMap, - boolean canMatch + boolean canMatch, + IndexMode indexMode ) { this.indexName = indexName; this.indexMappingHash = indexMappingHash; this.responseMap = responseMap; this.canMatch = canMatch; this.originVersion = TransportVersion.current(); + this.indexMode = indexMode; } FieldCapabilitiesIndexResponse(StreamInput in) throws IOException { @@ -57,6 +61,11 @@ public FieldCapabilitiesIndexResponse( } else { this.indexMappingHash = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + this.indexMode = IndexMode.readFrom(in); + } else { + this.indexMode = IndexMode.STANDARD; + } } @Override @@ -67,9 +76,12 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { out.writeOptionalString(indexMappingHash); } + if (out.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + IndexMode.writeTo(indexMode, out); + } } - private record CompressedGroup(String[] indices, String mappingHash, int[] fields) {} + private record CompressedGroup(String[] indices, IndexMode indexMode, String mappingHash, int[] fields) {} static List readList(StreamInput input) throws IOException { if (input.getTransportVersion().before(MAPPING_HASH_VERSION)) { @@ -92,10 +104,12 @@ static List readList(StreamInput input) throws I private static void collectCompressedResponses(StreamInput input, int groups, ArrayList responses) throws IOException { final CompressedGroup[] compressedGroups = new CompressedGroup[groups]; + final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE); for (int i = 0; i < groups; i++) { final String[] indices = input.readStringArray(); + final IndexMode indexMode = readIndexMode ? IndexMode.readFrom(input) : IndexMode.STANDARD; final String mappingHash = input.readString(); - compressedGroups[i] = new CompressedGroup(indices, mappingHash, input.readIntArray()); + compressedGroups[i] = new CompressedGroup(indices, indexMode, mappingHash, input.readIntArray()); } final IndexFieldCapabilities[] ifcLookup = input.readArray(IndexFieldCapabilities::readFrom, IndexFieldCapabilities[]::new); for (CompressedGroup compressedGroup : compressedGroups) { @@ -105,7 +119,7 @@ private static void collectCompressedResponses(StreamInput input, int groups, Ar ifc.put(val.name(), val); } for (String index : compressedGroup.indices) { - responses.add(new FieldCapabilitiesIndexResponse(index, compressedGroup.mappingHash, ifc, true)); + responses.add(new FieldCapabilitiesIndexResponse(index, compressedGroup.mappingHash, ifc, true, compressedGroup.indexMode)); } } } @@ -117,7 +131,7 @@ private static void collectResponsesLegacyFormat(StreamInput input, int groups, final String mappingHash = input.readString(); final Map ifc = input.readMap(IndexFieldCapabilities::readFrom); for (String index : indices) { - responses.add(new FieldCapabilitiesIndexResponse(index, mappingHash, ifc, true)); + responses.add(new FieldCapabilitiesIndexResponse(index, mappingHash, ifc, true, IndexMode.STANDARD)); } } } @@ -164,6 +178,9 @@ private static void writeCompressedResponses(StreamOutput output, Map { o.writeCollection(fieldCapabilitiesIndexResponses, (oo, r) -> oo.writeString(r.indexName)); var first = fieldCapabilitiesIndexResponses.get(0); + if (output.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + IndexMode.writeTo(first.indexMode, o); + } o.writeString(first.indexMappingHash); o.writeVInt(first.responseMap.size()); for (IndexFieldCapabilities ifc : first.responseMap.values()) { @@ -192,6 +209,10 @@ public String getIndexMappingHash() { return indexMappingHash; } + public IndexMode getIndexMode() { + return indexMode; + } + public boolean canMatch() { return canMatch; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 41bf42b4e4e9c..bb97b0dc48c42 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -26,7 +27,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -34,6 +34,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.search.SearchService; @@ -173,7 +174,13 @@ private void doExecuteForked( if (resp.canMatch() && resp.getIndexMappingHash() != null) { FieldCapabilitiesIndexResponse curr = indexMappingHashToResponses.putIfAbsent(resp.getIndexMappingHash(), resp); if (curr != null) { - resp = new FieldCapabilitiesIndexResponse(resp.getIndexName(), curr.getIndexMappingHash(), curr.get(), true); + resp = new FieldCapabilitiesIndexResponse( + resp.getIndexName(), + curr.getIndexMappingHash(), + curr.get(), + true, + curr.getIndexMode() + ); } } if (request.includeEmptyFields()) { @@ -185,7 +192,13 @@ private void doExecuteForked( } Map mergedCaps = new HashMap<>(a.get()); mergedCaps.putAll(b.get()); - return new FieldCapabilitiesIndexResponse(a.getIndexName(), a.getIndexMappingHash(), mergedCaps, true); + return new FieldCapabilitiesIndexResponse( + a.getIndexName(), + a.getIndexMappingHash(), + mergedCaps, + true, + a.getIndexMode() + ); }); } if (fieldCapTask.isCancelled()) { @@ -248,7 +261,13 @@ private void doExecuteForked( for (FieldCapabilitiesIndexResponse resp : response.getIndexResponses()) { String indexName = RemoteClusterAware.buildRemoteIndexName(clusterAlias, resp.getIndexName()); handleIndexResponse.accept( - new FieldCapabilitiesIndexResponse(indexName, resp.getIndexMappingHash(), resp.get(), resp.canMatch()) + new FieldCapabilitiesIndexResponse( + indexName, + resp.getIndexMappingHash(), + resp.get(), + resp.canMatch(), + resp.getIndexMode() + ) ); } for (FieldCapabilitiesFailure failure : response.getFailures()) { @@ -557,7 +576,12 @@ public void messageReceived(FieldCapabilitiesNodeRequest request, TransportChann .stream() .collect(Collectors.groupingBy(ShardId::getIndexName)); final FieldCapabilitiesFetcher fetcher = new FieldCapabilitiesFetcher(indicesService, request.includeEmptyFields()); - final Predicate fieldNameFilter = Regex.simpleMatcher(request.fields()); + Predicate fieldNameFilter; + try { + fieldNameFilter = Regex.simpleMatcher(request.fields()); + } catch (TooComplexToDeterminizeException e) { + throw new IllegalArgumentException("The field names are too complex to process. " + e.getMessage()); + } for (List shardIds : groupedShardIds.values()) { final Map failures = new HashMap<>(); final Set unmatched = new HashSet<>(); diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index db26da382d3e1..330a97e27f278 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -42,6 +41,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java index f84e35e5303d1..0b63bdc0cf980 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -31,6 +30,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index fcb10f3deef60..671ba29db242c 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -19,12 +19,12 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index a5433618441c3..07b586713f152 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -43,6 +42,7 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 5463f9fec4d2a..61d610c9eda4e 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.ingest.IngestService; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -147,6 +147,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement private Object rawTimestamp; private long normalisedBytesParsed = -1; private boolean originatesFromUpdateByScript; + private boolean originatesFromUpdateByDoc; public IndexRequest(StreamInput in) throws IOException { this(null, in); @@ -204,6 +205,12 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } else { originatesFromUpdateByScript = false; } + + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + originatesFromUpdateByDoc = in.readBoolean(); + } else { + originatesFromUpdateByDoc = false; + } } public IndexRequest() { @@ -407,8 +414,8 @@ public Map sourceAsMap() { return XContentHelper.convertToMap(source, false, contentType).v2(); } - public Map sourceAsMap(DocumentSizeObserver documentSizeObserver) { - return XContentHelper.convertToMap(source, false, contentType, documentSizeObserver).v2(); + public Map sourceAsMap(XContentParserDecorator parserDecorator) { + return XContentHelper.convertToMap(source, false, contentType, parserDecorator).v2(); } /** @@ -768,6 +775,10 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { out.writeBoolean(originatesFromUpdateByScript); } + + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + out.writeBoolean(originatesFromUpdateByDoc); + } } @Override @@ -931,15 +942,6 @@ public IndexRequest setNormalisedBytesParsed(long normalisedBytesParsed) { return this; } - /** - * when observing document size while parsing, this method indicates that this request should not be recorded. - * @return an index request - */ - public IndexRequest noParsedBytesToReport() { - this.normalisedBytesParsed = 0; - return this; - } - /** * Adds the pipeline to the list of executed pipelines, if listExecutedPipelines is true * @@ -977,6 +979,15 @@ public IndexRequest setOriginatesFromUpdateByScript(boolean originatesFromUpdate } public boolean originatesFromUpdateByScript() { - return this.originatesFromUpdateByScript; + return originatesFromUpdateByScript; + } + + public boolean originatesFromUpdateByDoc() { + return originatesFromUpdateByDoc; + } + + public IndexRequest setOriginatesFromUpdateByDoc(boolean originatesFromUpdateByDoc) { + this.originatesFromUpdateByDoc = originatesFromUpdateByDoc; + return this; } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 0cb04fbdba1a6..3f3d97e2115cb 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -54,6 +54,7 @@ public IndexRequestBuilder(ElasticsearchClient client) { this(client, null); } + @SuppressWarnings("this-escape") public IndexRequestBuilder(ElasticsearchClient client, @Nullable String index) { super(client, TransportIndexAction.TYPE); setIndex(index); diff --git a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index cc61f40d303d3..304f37401f5fd 100644 --- a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.transport.TransportService; /** diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java index a8eef9f94b884..99a8cf5a38f13 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java @@ -17,9 +17,9 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java index 8ba251e911431..834e6e5991b26 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineTransportAction.java @@ -16,9 +16,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 5a97596aa00ff..46d5ad0017321 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -21,9 +21,9 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index f38dff3f8c83c..2fc59086b54a6 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -17,12 +17,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 5a891f33480fa..e3f0f5842cfb3 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexingPressure; @@ -30,6 +29,7 @@ import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponseHandler; @@ -73,7 +73,8 @@ public TransportResyncReplicationAction( ExecutorSelector.getWriteExecutorForShard(threadPool), PrimaryActionExecution.Force, /* we should never reject resync because of thread pool capacity on primary */ indexingPressure, - systemIndices + systemIndices, + ReplicaActionExecution.SubjectToCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 3ff820c0e0354..1e5b5ebbefe48 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -707,9 +707,11 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; final BytesReference searchContextId; if (buildPointInTimeFromSearchResults()) { - searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion); + searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion, failures); } else { - if (request.source() != null && request.source().pointInTimeBuilder() != null) { + if (request.source() != null + && request.source().pointInTimeBuilder() != null + && request.source().pointInTimeBuilder().singleSession() == false) { searchContextId = request.source().pointInTimeBuilder().getEncodedId(); } else { searchContextId = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index 04573f72068f3..965b19a69b858 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -166,6 +166,10 @@ public static void closeContexts( final var successes = new AtomicInteger(); try (RefCountingRunnable refs = new RefCountingRunnable(() -> l.onResponse(successes.get()))) { for (SearchContextIdForNode contextId : contextIds) { + if (contextId.getNode() == null) { + // the shard was missing when creating the PIT, ignore. + continue; + } final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); if (node != null) { try { diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 9ddac7f13eb51..7a33eaa59eb03 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -155,7 +155,8 @@ ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { QueryBuilder query = new KnnScoreDocQueryBuilder( scoreDocs.toArray(Lucene.EMPTY_SCORE_DOCS), source.knnSearch().get(i).getField(), - source.knnSearch().get(i).getQueryVector() + source.knnSearch().get(i).getQueryVector(), + source.knnSearch().get(i).getSimilarity() ).boost(source.knnSearch().get(i).boost()).queryName(source.knnSearch().get(i).queryName()); if (nestedPath != null) { query = new NestedQueryBuilder(nestedPath, query, ScoreMode.Max).innerHit(source.knnSearch().get(i).innerHit()); diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 4bf8206861a4d..fa22cb36b70ab 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -206,7 +206,7 @@ public static void readMultiLineFormat( TriFunction extraParamParser ) throws IOException { int from = 0; - byte marker = xContent.streamSeparator(); + byte marker = xContent.bulkSeparator(); while (true) { int nextMarker = findNextMarker(marker, from, data); if (nextMarker == -1) { @@ -343,7 +343,7 @@ public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, writeSearchRequestParams(request, xContentBuilder); BytesReference.bytes(xContentBuilder).writeTo(output); } - output.write(xContent.streamSeparator()); + output.write(xContent.bulkSeparator()); try (XContentBuilder xContentBuilder = XContentBuilder.builder(xContent)) { if (request.source() != null) { request.source().toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); @@ -353,7 +353,7 @@ public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, } BytesReference.bytes(xContentBuilder).writeTo(output); } - output.write(xContent.streamSeparator()); + output.write(xContent.bulkSeparator()); } return output.toByteArray(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index a1cd4df25a25c..146418839f063 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -41,6 +41,8 @@ public final class OpenPointInTimeRequest extends ActionRequest implements Indic private QueryBuilder indexFilter; + private boolean allowPartialSearchResults = false; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = SearchRequest.DEFAULT_INDICES_OPTIONS; public OpenPointInTimeRequest(String... indices) { @@ -60,6 +62,9 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + this.allowPartialSearchResults = in.readBoolean(); + } } @Override @@ -76,6 +81,11 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalWriteable(indexFilter); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeBoolean(allowPartialSearchResults); + } else if (allowPartialSearchResults) { + throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion()); + } } @Override @@ -180,6 +190,15 @@ public boolean includeDataStreams() { return true; } + public boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public OpenPointInTimeRequest allowPartialSearchResults(boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + return this; + } + @Override public String getDescription() { return "open search context: indices [" + String.join(",", indices) + "] keep_alive [" + keepAlive + "]"; @@ -200,6 +219,8 @@ public String toString() { + ", preference='" + preference + '\'' + + ", allowPartialSearchResults=" + + allowPartialSearchResults + '}'; } @@ -218,12 +239,13 @@ public boolean equals(Object o) { && indicesOptions.equals(that.indicesOptions) && keepAlive.equals(that.keepAlive) && Objects.equals(routing, that.routing) - && Objects.equals(preference, that.preference); + && Objects.equals(preference, that.preference) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults); } @Override public int hashCode() { - int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference); + int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference, allowPartialSearchResults); result = 31 * result + Arrays.hashCode(indices); return result; } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index dafcee894c9a6..4a4c0252fb109 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,22 +19,46 @@ import java.util.Base64; import java.util.Objects; +import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; + public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { private final BytesReference pointInTimeId; - public OpenPointInTimeResponse(BytesReference pointInTimeId) { + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + + public OpenPointInTimeResponse( + BytesReference pointInTimeId, + int totalShards, + int successfulShards, + int failedShards, + int skippedShards + ) { this.pointInTimeId = Objects.requireNonNull(pointInTimeId, "Point in time parameter must be not null"); + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + this.skippedShards = skippedShards; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(pointInTimeId); + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("id", Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId))); + buildBroadcastShardsHeader(builder, params, totalShards, successfulShards, failedShards, skippedShards, null); builder.endObject(); return builder; } @@ -42,4 +67,19 @@ public BytesReference getPointInTimeId() { return pointInTimeId; } + public int getTotalShards() { + return totalShards; + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getFailedShards() { + return failedShards; + } + + public int getSkippedShards() { + return skippedShards; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 5b42afcb86928..0f7cbd65a63c2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -28,8 +28,8 @@ /** * This search phase is responsible for executing any re-ranking needed for the given search request, iff that is applicable. - * It starts by retrieving {@code num_shards * window_size} results from the query phase and reduces them to a global list of - * the top {@code window_size} results. It then reaches out to the shards to extract the needed feature data, + * It starts by retrieving {@code num_shards * rank_window_size} results from the query phase and reduces them to a global list of + * the top {@code rank_window_size} results. It then reaches out to the shards to extract the needed feature data, * and finally passes all this information to the appropriate {@code RankFeatureRankCoordinatorContext} which is responsible for reranking * the results. If no rank query is specified, it proceeds directly to the next phase (FetchSearchPhase) by first reducing the results. */ @@ -88,7 +88,7 @@ public void onFailure(Exception e) { void innerRun() throws Exception { // if the RankBuilder specifies a QueryPhaseCoordinatorContext, it will be called as part of the reduce call - // to operate on the first `window_size * num_shards` results and merge them appropriately. + // to operate on the first `rank_window_size * num_shards` results and merge them appropriately. SearchPhaseController.ReducedQueryPhase reducedQueryPhase = queryPhaseResults.reduce(); RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext = coordinatorContext(context.getRequest().source()); if (rankFeaturePhaseRankCoordinatorContext != null) { diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 0e7f3f9111842..5966a1c924745 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -47,6 +47,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC openRequest.routing(request.param("routing")); openRequest.preference(request.param("preference")); openRequest.keepAlive(TimeValue.parseTimeValue(request.param("keep_alive"), null, "keep_alive")); + openRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", false)); if (request.hasParam("max_concurrent_shard_requests")) { final int maxConcurrentShardRequests = request.paramAsInt( "max_concurrent_shard_requests", diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 95d22e8a9034e..2e4dc724413ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -58,12 +59,30 @@ public boolean contains(ShardSearchContextId contextId) { public static BytesReference encode( List searchPhaseResults, Map aliasFilter, - TransportVersion version + TransportVersion version, + ShardSearchFailure[] shardFailures ) { + assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT) + : "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version [" + + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT + + "] or higher."; try (var out = new BytesStreamOutput()) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); - out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0); + out.writeVInt(shardSize); + for (var searchResult : searchPhaseResults) { + final SearchShardTarget target = searchResult.getSearchShardTarget(); + target.getShardId().writeTo(out); + new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchResult.getContextId()).writeTo(out); + } + if (allowNullContextId) { + for (var failure : shardFailures) { + failure.shard().getShardId().writeTo(out); + new SearchContextIdForNode(failure.shard().getClusterAlias(), null, null).writeTo(out); + } + } out.writeMap(aliasFilter, StreamOutput::writeWriteable); return out.bytes(); } catch (IOException e) { @@ -72,12 +91,6 @@ public static BytesReference encode( } } - private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { - final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); - target.getShardId().writeTo(out); - new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); - } - public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, BytesReference id) { try (var in = new NamedWriteableAwareStreamInput(id.streamInput(), namedWriteableRegistry)) { final TransportVersion version = TransportVersion.readVersion(in); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index 3071362f552ea..a70ddf6ee14b9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,25 +22,59 @@ public final class SearchContextIdForNode implements Writeable { private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + /** + * Contains the details required to retrieve a {@link ShardSearchContextId} for a shard on a specific node. + * + * @param clusterAlias The alias of the cluster, or {@code null} if the shard is local. + * @param node The target node where the search context ID is defined, or {@code null} if the shard is missing or unavailable. + * @param searchContextId The {@link ShardSearchContextId}, or {@code null} if the shard is missing or unavailable. + */ + SearchContextIdForNode(@Nullable String clusterAlias, @Nullable String node, @Nullable ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; } SearchContextIdForNode(StreamInput in) throws IOException { - this.node = in.readString(); + boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + this.node = allowNull ? in.readOptionalString() : in.readString(); this.clusterAlias = in.readOptionalString(); - this.searchContextId = new ShardSearchContextId(in); + this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(node); + boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + if (allowNull) { + out.writeOptionalString(node); + } else { + if (node == null) { + // We should never set a null node if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null node value to a node in version " + + out.getTransportVersion() + + ". The target node must be specified to retrieve the ShardSearchContextId." + ); + } + out.writeString(node); + } out.writeOptionalString(clusterAlias); - searchContextId.writeTo(out); + if (allowNull) { + out.writeOptionalWriteable(searchContextId); + } else { + if (searchContextId == null) { + // We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null search context ID to a node in version " + + out.getTransportVersion() + + ". A valid search context ID is required to identify the shard's search context in this version." + ); + } + searchContextId.writeTo(out); + } } + @Nullable public String getNode() { return node; } @@ -49,6 +84,7 @@ public String getClusterAlias() { return clusterAlias; } + @Nullable public ShardSearchContextId getSearchContextId() { return searchContextId; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 7ad81154691c0..da8479873a4b6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -42,7 +42,13 @@ public void start() { } } - static void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { + protected String missingShardsErrorMessage(StringBuilder missingShards) { + return "Search rejected due to missing shards [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + } + + protected void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; if (request.allowPartialSearchResults() == false) { final StringBuilder missingShards = new StringBuilder(); @@ -58,9 +64,7 @@ static void doCheckNoMissingShards(String phaseName, SearchRequest request, Grou } if (missingShards.isEmpty() == false) { // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = "Search rejected due to missing shards [" - + missingShards - + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + final String msg = missingShardsErrorMessage(missingShards); throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 514e8d10eeca1..415a2ba7da9f8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -320,9 +320,10 @@ public void writeTo(StreamOutput out) throws IOException { public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; boolean scroll = scroll() != null; + boolean allowPartialSearchResults = allowPartialSearchResults() != null && allowPartialSearchResults(); if (source != null) { - validationException = source.validate(validationException, scroll); + validationException = source.validate(validationException, scroll, allowPartialSearchResults); } if (scroll) { if (requestCache != null && requestCache) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 45cb118691082..8d70e2dd6bb66 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -47,6 +47,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.BiFunction; import java.util.function.Predicate; import java.util.function.Supplier; @@ -701,6 +702,13 @@ public Cluster getCluster(String clusterAlias) { return clusterInfo.get(clusterAlias); } + /** + * @return collection of cluster aliases in the search response (including "(local)" if was searched). + */ + public Set getClusterAliases() { + return clusterInfo.keySet(); + } + /** * Utility to swap a Cluster object. Guidelines for the remapping function: *
    @@ -803,6 +811,7 @@ public boolean hasClusterObjects() { public boolean hasRemoteClusters() { return total > 1 || clusterInfo.keySet().stream().anyMatch(alias -> alias != RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); } + } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java index 3bf72313c4c21..cc5d60ad0b0c0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -69,4 +69,11 @@ public Supplier getSearchResponseMergerSupplier() { public void setSearchResponseMergerSupplier(Supplier supplier) { this.searchResponseMergerSupplier = supplier; } + + /** + * Is this async search? + */ + public boolean isAsync() { + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index fb3c49d83cb93..52d4542faaf77 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -24,9 +24,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -60,6 +63,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Executor; import java.util.function.BiFunction; import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; @@ -455,9 +459,10 @@ public static void registerRequestHandler( boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(SearchFreeContextResponse.of(freed)); }; + final Executor freeContextExecutor = buildFreeContextExecutor(transportService); transportService.registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, ScrollFreeContextRequest::new, instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) ); @@ -470,7 +475,7 @@ public static void registerRequestHandler( transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, SearchFreeContextRequest::new, instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) ); @@ -478,7 +483,7 @@ public static void registerRequestHandler( transportService.registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, - transportService.getThreadPool().generic(), + freeContextExecutor, ClearScrollContextsRequest::new, instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { searchService.freeAllScrollContexts(); @@ -626,6 +631,32 @@ public static void registerRequestHandler( TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } + private static Executor buildFreeContextExecutor(TransportService transportService) { + final ThrottledTaskRunner throttledTaskRunner = new ThrottledTaskRunner( + "free_context", + 1, + transportService.getThreadPool().generic() + ); + return r -> throttledTaskRunner.enqueueTask(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + try (releasable) { + r.run(); + } + } + + @Override + public void onFailure(Exception e) { + if (r instanceof AbstractRunnable abstractRunnable) { + abstractRunnable.onFailure(e); + } + // should be impossible, GENERIC pool doesn't reject anything + logger.error("unexpected failure running " + r, e); + assert false : new AssertionError("unexpected failure running " + r, e); + } + }); + } + private static TransportRequestHandler instrumentedHandler( String actionQualifier, TransportService transportService, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 65284d5d55585..cc6223453195f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java index f507b2e1136a8..7fd81a7b8ea29 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClosePointInTimeAction.java @@ -13,9 +13,9 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a2c41042871ba..9d2f97e4f051c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -15,14 +15,15 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,7 +40,6 @@ public class TransportMultiSearchAction extends HandledTransportAction TYPE = new ActionType<>(NAME); private static final Logger logger = LogManager.getLogger(TransportMultiSearchAction.class); private final int allocatedProcessors; - private final ThreadPool threadPool; private final ClusterService clusterService; private final LongSupplier relativeTimeProvider; private final NodeClient client; @@ -47,14 +47,12 @@ public class TransportMultiSearchAction extends HandledTransportAction listener, final long relativeStartTime ) { + /* + * The number of times that we poll an item from the queue here is the minimum of the number of requests and the maximum number + * of concurrent requests. At first glance, it appears that we should never poll from the queue and not obtain a request given + * that we only poll here no more times than the number of requests. However, this is not the only consumer of this queue as + * earlier requests that have already completed will poll from the queue too, and they could complete before later polls are + * invoked here. Thus, it can be the case that we poll here and the queue was empty. + */ SearchRequestSlot request = requests.poll(); - if (request == null) { - /* - * The number of times that we poll an item from the queue here is the minimum of the number of requests and the maximum number - * of concurrent requests. At first glance, it appears that we should never poll from the queue and not obtain a request given - * that we only poll here no more times than the number of requests. However, this is not the only consumer of this queue as - * earlier requests that have already completed will poll from the queue too and they could complete before later polls are - * invoked here. Thus, it can be the case that we poll here and the queue was empty. - */ - return; + // If we have another request to execute, we execute it. If the execution forked #doExecuteSearch will return false and will + // recursively call this method again eventually. If it did not fork and was able to execute the search right away #doExecuteSearch + // will return true, in which case we continue and run the next search request here. + while (request != null && doExecuteSearch(requests, responses, responseCounter, relativeStartTime, request, listener)) { + request = requests.poll(); } + } - /* - * With a request in hand, we are now prepared to execute the search request. There are two possibilities, either we go asynchronous - * or we do not (this can happen if the request does not resolve to any shards). If we do not go asynchronous, we are going to come - * back on the same thread that attempted to execute the search request. At this point, or any other point where we come back on the - * same thread as when the request was submitted, we should not recurse lest we might descend into a stack overflow. To avoid this, - * when we handle the response rather than going recursive, we fork to another thread, otherwise we recurse. - */ - final Thread thread = Thread.currentThread(); - client.search(request.request, new ActionListener<>() { + private boolean doExecuteSearch( + Queue requests, + AtomicArray responses, + AtomicInteger responseCounter, + long relativeStartTime, + SearchRequestSlot request, + ActionListener listener + ) { + final SubscribableListener subscribeListener = new SubscribableListener<>(); + client.search(request.request, subscribeListener.safeMap(searchResponse -> { + searchResponse.mustIncRef(); // acquire reference on behalf of MultiSearchResponse.Item below + return new MultiSearchResponse.Item(searchResponse, null); + })); + final ActionListener responseListener = new ActionListener<>() { @Override - public void onResponse(final SearchResponse searchResponse) { - searchResponse.mustIncRef(); // acquire reference on behalf of MultiSearchResponse.Item below - handleResponse(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null)); + public void onResponse(final MultiSearchResponse.Item searchResponse) { + handleResponse(request.responseSlot, searchResponse); } @Override @@ -174,15 +178,6 @@ private void handleResponse(final int responseSlot, final MultiSearchResponse.It if (responseCounter.decrementAndGet() == 0) { assert requests.isEmpty(); finish(); - } else { - if (thread == Thread.currentThread()) { - // we are on the same thread, we need to fork to another thread to avoid recursive stack overflow on a single thread - threadPool.generic() - .execute(() -> executeSearch(requests, responses, responseCounter, listener, relativeStartTime)); - } else { - // we are on a different thread (we went asynchronous), it's safe to recurse - executeSearch(requests, responses, responseCounter, listener, relativeStartTime); - } } } @@ -199,7 +194,19 @@ private void finish() { private long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - relativeStartTime); } - }); + }; + if (subscribeListener.isDone()) { + subscribeListener.addListener(responseListener); + return true; + } + // we went forked and have to check if there's more searches to execute after we're done with this search + subscribeListener.addListener( + ActionListener.runAfter( + responseListener, + () -> executeSearch(requests, responses, responseCounter, listener, relativeStartTime) + ) + ); + return false; } record SearchRequestSlot(SearchRequest request, int responseSlot) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 91784ba331857..717b1805547be 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; @@ -21,7 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +31,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -50,6 +54,8 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; +import static org.elasticsearch.core.Strings.format; + public class TransportOpenPointInTimeAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportOpenPointInTimeAction.class); @@ -62,6 +68,7 @@ public class TransportOpenPointInTimeAction extends HandledTransportAction listener) { + final ClusterState clusterState = clusterService.state(); + // Check if all the nodes in this cluster know about the service + if (request.allowPartialSearchResults() + && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + listener.onFailure( + new ElasticsearchStatusException( + format( + "The [allow_partial_search_results] parameter cannot be used while the cluster is still upgrading. " + + "Please wait until the upgrade is fully completed and try again." + ), + RestStatus.BAD_REQUEST + ) + ); + return; + } final SearchRequest searchRequest = new SearchRequest().indices(request.indices()) .indicesOptions(request.indicesOptions()) .preference(request.preference()) .routing(request.routing()) - .allowPartialSearchResults(false) + .allowPartialSearchResults(request.allowPartialSearchResults()) .source(new SearchSourceBuilder().query(request.indexFilter())); searchRequest.setMaxConcurrentShardRequests(request.maxConcurrentShardRequests()); searchRequest.setCcsMinimizeRoundtrips(false); transportSearchAction.executeRequest((SearchTask) task, searchRequest, listener.map(r -> { assert r.pointInTimeId() != null : r; - return new OpenPointInTimeResponse(r.pointInTimeId()); + return new OpenPointInTimeResponse( + r.pointInTimeId(), + r.getTotalShards(), + r.getSuccessfulShards(), + r.getFailedShards(), + r.getSkippedShards() + ); }), searchListener -> new OpenPointInTimePhase(request, searchListener)); } @@ -213,6 +243,13 @@ SearchPhase openPointInTimePhase( searchRequest.getMaxConcurrentShardRequests(), clusters ) { + @Override + protected String missingShardsErrorMessage(StringBuilder missingShards) { + return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + } + @Override protected void executePhaseOnShard( SearchShardIterator shardIt, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 84d233ec9710a..32ee9c331295c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -23,9 +23,12 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; +import org.elasticsearch.action.admin.cluster.stats.CCSUsage; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -42,10 +45,10 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.ArrayUtils; @@ -64,10 +67,12 @@ import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.action.search.SearchResponseMetrics; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; @@ -82,6 +87,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentFactory; @@ -154,6 +160,7 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexOriginalIndices( @@ -303,43 +312,7 @@ public long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - ActionListener loggingAndMetrics = new ActionListener<>() { - @Override - public void onResponse(SearchResponse searchResponse) { - try { - searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); - SearchResponseMetrics.ResponseCountTotalStatus responseCountTotalStatus = - SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS; - if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { - // Deduplicate failures by exception message and index - ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); - for (ShardOperationFailedException f : groupedFailures) { - boolean causeHas500Status = false; - if (f.getCause() != null) { - causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; - } - if ((f.status().getStatus() >= 500 || causeHas500Status) - && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { - logger.warn("TransportSearchAction shard failure (partial results response)", f); - responseCountTotalStatus = SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE; - } - } - } - listener.onResponse(searchResponse); - // increment after the delegated onResponse to ensure we don't - // record both a success and a failure if there is an exception - searchResponseMetrics.incrementResponseCount(responseCountTotalStatus); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - searchResponseMetrics.incrementResponseCount(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE); - listener.onFailure(e); - } - }; + ActionListener loggingAndMetrics = new SearchResponseActionListener((SearchTask) task, listener); executeRequest((SearchTask) task, searchRequest, loggingAndMetrics, AsyncSearchActionProvider::new); } @@ -394,8 +367,32 @@ void executeRequest( searchPhaseProvider.apply(delegate) ); } else { + if ((listener instanceof TelemetryListener tl) && CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) { + tl.setRemotes(resolvedIndices.getRemoteClusterIndices().size()); + if (task.isAsync()) { + tl.setFeature(CCSUsageTelemetry.ASYNC_FEATURE); + } + String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); + if (client != null) { + tl.setClient(client); + } + // Check if any of the index patterns are wildcard patterns + var localIndices = resolvedIndices.getLocalIndices(); + if (localIndices != null && Arrays.stream(localIndices.indices()).anyMatch(Regex::isSimpleMatchPattern)) { + tl.setFeature(CCSUsageTelemetry.WILDCARD_FEATURE); + } + if (resolvedIndices.getRemoteClusterIndices() + .values() + .stream() + .anyMatch(indices -> Arrays.stream(indices.indices()).anyMatch(Regex::isSimpleMatchPattern))) { + tl.setFeature(CCSUsageTelemetry.WILDCARD_FEATURE); + } + } final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).taskId(); if (shouldMinimizeRoundtrips(rewritten)) { + if ((listener instanceof TelemetryListener tl) && CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) { + tl.setFeature(CCSUsageTelemetry.MRT_FEATURE); + } final AggregationReduceContext.Builder aggregationReduceContextBuilder = rewritten.source() != null && rewritten.source().aggregations() != null ? searchService.aggReduceContextBuilder(task::isCancelled, rewritten.source().aggregations()) @@ -495,11 +492,72 @@ void executeRequest( } } }); + final SearchSourceBuilder source = original.source(); + if (shouldOpenPIT(source)) { + openPIT(client, original, searchService.getDefaultKeepAliveInMillis(), listener.delegateFailureAndWrap((delegate, resp) -> { + // We set the keep alive to -1 to indicate that we don't need the pit id in the response. + // This is needed since we delete the pit prior to sending the response so the id doesn't exist anymore. + source.pointInTimeBuilder(new PointInTimeBuilder(resp.getPointInTimeId()).setKeepAlive(TimeValue.MINUS_ONE)); + executeRequest(task, original, new ActionListener<>() { + @Override + public void onResponse(SearchResponse response) { + // we need to close the PIT first so we delay the release of the response to after the closing + response.incRef(); + closePIT( + client, + original.source().pointInTimeBuilder(), + () -> ActionListener.respondAndRelease(listener, response) + ); + } + + @Override + public void onFailure(Exception e) { + closePIT(client, original.source().pointInTimeBuilder(), () -> listener.onFailure(e)); + } + }, searchPhaseProvider); + })); + } else { + Rewriteable.rewriteAndFetch( + original, + searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices, original.pointInTimeBuilder()), + rewriteListener + ); + } + } - Rewriteable.rewriteAndFetch( - original, - searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices), - rewriteListener + /** + * Returns true if the provided source needs to open a shared point in time prior to executing the request. + */ + private boolean shouldOpenPIT(SearchSourceBuilder source) { + if (source == null) { + return false; + } + if (source.pointInTimeBuilder() != null) { + return false; + } + var retriever = source.retriever(); + return retriever != null && retriever.isCompound(); + } + + static void openPIT(Client client, SearchRequest request, long keepAliveMillis, ActionListener listener) { + OpenPointInTimeRequest pitReq = new OpenPointInTimeRequest(request.indices()).indicesOptions(request.indicesOptions()) + .preference(request.preference()) + .routing(request.routing()) + .keepAlive(TimeValue.timeValueMillis(keepAliveMillis)); + client.execute(TransportOpenPointInTimeAction.TYPE, pitReq, listener); + } + + static void closePIT(Client client, PointInTimeBuilder pit, Runnable next) { + client.execute( + TransportClosePointInTimeAction.TYPE, + new ClosePointInTimeRequest(pit.getEncodedId()), + ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(ClosePointInTimeResponse closePointInTimeResponse) {} + + @Override + public void onFailure(Exception e) {} + }, next) ); } @@ -742,27 +800,26 @@ static void collectSearchShards( for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { final String clusterAlias = entry.getKey(); boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); - TransportSearchAction.CCSActionListener> singleListener = - new TransportSearchAction.CCSActionListener<>( - clusterAlias, - skipUnavailable, - responsesCountDown, - exceptions, - clusters, - listener - ) { - @Override - void innerOnResponse(SearchShardsResponse searchShardsResponse) { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); - ccsClusterInfoUpdate(searchShardsResponse, clusters, clusterAlias, timeProvider); - searchShardsResponses.put(clusterAlias, searchShardsResponse); - } + CCSActionListener> singleListener = new CCSActionListener<>( + clusterAlias, + skipUnavailable, + responsesCountDown, + exceptions, + clusters, + listener + ) { + @Override + void innerOnResponse(SearchShardsResponse searchShardsResponse) { + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); + ccsClusterInfoUpdate(searchShardsResponse, clusters, clusterAlias, timeProvider); + searchShardsResponses.put(clusterAlias, searchShardsResponse); + } - @Override - Map createFinalResponse() { - return searchShardsResponses; - } - }; + @Override + Map createFinalResponse() { + return searchShardsResponses; + } + }; remoteClusterService.maybeEnsureConnectedAndGetConnection( clusterAlias, skipUnavailable == false, @@ -789,9 +846,10 @@ Map createFinalResponse() { ); } else { // does not do a can-match - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions( - indicesOptions - ).local(true).preference(preference).routing(routing); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest( + MasterNodeRequest.infiniteMasterNodeTimeout(connection.getTransportVersion()), + indices + ).indicesOptions(indicesOptions).local(true).preference(preference).routing(routing); transportService.sendRequest( connection, TransportClusterSearchShardsAction.TYPE.name(), @@ -1052,11 +1110,16 @@ static List getRemoteShardsIteratorFromPointInTime( final String clusterAlias = entry.getKey(); assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); final List targetNodes = new ArrayList<>(group.allocatedNodes().size()); - targetNodes.add(perNode.getNode()); - if (perNode.getSearchContextId().getSearcherId() != null) { - for (String node : group.allocatedNodes()) { - if (node.equals(perNode.getNode()) == false) { - targetNodes.add(node); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + targetNodes.add(perNode.getNode()); + if (perNode.getSearchContextId().getSearcherId() != null) { + for (String node : group.allocatedNodes()) { + if (node.equals(perNode.getNode()) == false) { + targetNodes.add(node); + } } } } @@ -1152,7 +1215,7 @@ private void executeSearch( assert searchRequest.pointInTimeBuilder() != null; aliasFilter = resolvedIndices.getSearchContextId().aliasFilter(); concreteLocalIndices = resolvedIndices.getLocalIndices() == null ? new String[0] : resolvedIndices.getLocalIndices().indices(); - localShardIterators = getLocalLocalShardsIteratorFromPointInTime( + localShardIterators = getLocalShardsIteratorFromPointInTime( clusterState, searchRequest.indicesOptions(), searchRequest.getLocalClusterAlias(), @@ -1659,7 +1722,7 @@ private static RemoteTransportException wrapRemoteClusterFailure(String clusterA return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); } - static List getLocalLocalShardsIteratorFromPointInTime( + static List getLocalShardsIteratorFromPointInTime( ClusterState clusterState, IndicesOptions indicesOptions, String localClusterAlias, @@ -1673,25 +1736,30 @@ static List getLocalLocalShardsIteratorFromPointInTime( if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); final List targetNodes = new ArrayList<>(2); - try { - final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); - // Prefer executing shard requests on nodes that are part of PIT first. - if (clusterState.nodes().nodeExists(perNode.getNode())) { - targetNodes.add(perNode.getNode()); - } - if (perNode.getSearchContextId().getSearcherId() != null) { - for (ShardRouting shard : shards) { - if (shard.currentNodeId().equals(perNode.getNode()) == false) { - targetNodes.add(shard.currentNodeId()); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + try { + final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + // Prefer executing shard requests on nodes that are part of PIT first. + if (clusterState.nodes().nodeExists(perNode.getNode())) { + targetNodes.add(perNode.getNode()); + } + if (perNode.getSearchContextId().getSearcherId() != null) { + for (ShardRouting shard : shards) { + if (shard.currentNodeId().equals(perNode.getNode()) == false) { + targetNodes.add(shard.currentNodeId()); + } } } - } - } catch (IndexNotFoundException | ShardNotFoundException e) { - // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on - // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards - // when allowPartialSearchResults is false - if (allowPartialSearchResults == false) { - throw e; + } catch (IndexNotFoundException | ShardNotFoundException e) { + // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on + // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards + // when allowPartialSearchResults is false + if (allowPartialSearchResults == false) { + throw e; + } } } OriginalIndices finalIndices = new OriginalIndices(new String[] { shardId.getIndexName() }, indicesOptions); @@ -1750,4 +1818,112 @@ List getLocalShardsIterator( // the returned list must support in-place sorting, so this is the most memory efficient we can do here return Arrays.asList(list); } + + private interface TelemetryListener { + void setRemotes(int count); + + void setFeature(String feature); + + void setClient(String client); + } + + private class SearchResponseActionListener implements ActionListener, TelemetryListener { + private final SearchTask task; + private final ActionListener listener; + private final CCSUsage.Builder usageBuilder; + + SearchResponseActionListener(SearchTask task, ActionListener listener) { + this.task = task; + this.listener = listener; + usageBuilder = new CCSUsage.Builder(); + } + + /** + * Should we collect telemetry for this search? + */ + private boolean collectTelemetry() { + return CCS_TELEMETRY_FEATURE_FLAG.isEnabled() && usageBuilder.getRemotesCount() > 0; + } + + public void setRemotes(int count) { + usageBuilder.setRemotesCount(count); + } + + @Override + public void setFeature(String feature) { + usageBuilder.setFeature(feature); + } + + @Override + public void setClient(String client) { + usageBuilder.setClient(client); + } + + @Override + public void onResponse(SearchResponse searchResponse) { + try { + searchResponseMetrics.recordTookTime(searchResponse.getTookInMillis()); + SearchResponseMetrics.ResponseCountTotalStatus responseCountTotalStatus = + SearchResponseMetrics.ResponseCountTotalStatus.SUCCESS; + if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { + // Deduplicate failures by exception message and index + ShardOperationFailedException[] groupedFailures = ExceptionsHelper.groupBy(searchResponse.getShardFailures()); + for (ShardOperationFailedException f : groupedFailures) { + boolean causeHas500Status = false; + if (f.getCause() != null) { + causeHas500Status = ExceptionsHelper.status(f.getCause()).getStatus() >= 500; + } + if ((f.status().getStatus() >= 500 || causeHas500Status) + && ExceptionsHelper.isNodeOrShardUnavailableTypeException(f.getCause()) == false) { + logger.warn("TransportSearchAction shard failure (partial results response)", f); + responseCountTotalStatus = SearchResponseMetrics.ResponseCountTotalStatus.PARTIAL_FAILURE; + } + } + } + searchResponseMetrics.incrementResponseCount(responseCountTotalStatus); + + if (collectTelemetry()) { + extractCCSTelemetry(searchResponse); + recordTelemetry(); + } + } catch (Exception e) { + onFailure(e); + return; + } + // This is last because we want to collect telemetry before returning the response. + listener.onResponse(searchResponse); + } + + @Override + public void onFailure(Exception e) { + searchResponseMetrics.incrementResponseCount(SearchResponseMetrics.ResponseCountTotalStatus.FAILURE); + if (collectTelemetry()) { + usageBuilder.setFailure(e); + recordTelemetry(); + } + listener.onFailure(e); + } + + private void recordTelemetry() { + usageService.getCcsUsageHolder().updateUsage(usageBuilder.build()); + } + + /** + * Extract telemetry data from the search response. + * @param searchResponse The final response from the search. + */ + private void extractCCSTelemetry(SearchResponse searchResponse) { + usageBuilder.took(searchResponse.getTookInMillis()); + for (String clusterAlias : searchResponse.getClusters().getClusterAliases()) { + SearchResponse.Cluster cluster = searchResponse.getClusters().getCluster(clusterAlias); + if (cluster.getStatus() == SearchResponse.Cluster.Status.SKIPPED) { + usageBuilder.skippedRemote(clusterAlias); + } else { + usageBuilder.perClusterUsage(clusterAlias, cluster.getTook()); + } + } + + } + + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index d60033786abeb..1e2c445b22f3c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -18,8 +18,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.action.search.SearchResponseMetrics; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index c3eea1fe557e7..81b999a1239e5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -18,10 +18,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -87,6 +87,14 @@ public TransportSearchShardsAction( @Override protected void doExecute(Task task, SearchShardsRequest searchShardsRequest, ActionListener listener) { + searchShards(task, searchShardsRequest, listener); + } + + /** + * Notes that this method does not perform authorization for the search shards action. + * Callers must ensure that the request was properly authorized before calling this method. + */ + public void searchShards(Task task, SearchShardsRequest searchShardsRequest, ActionListener listener) { final long relativeStartNanos = System.nanoTime(); SearchRequest original = new SearchRequest(searchShardsRequest.indices()).indicesOptions(searchShardsRequest.indicesOptions()) .routing(searchShardsRequest.routing()) @@ -115,7 +123,7 @@ protected void doExecute(Task task, SearchShardsRequest searchShardsRequest, Act Rewriteable.rewriteAndFetch( original, - searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices), + searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices, null), listener.delegateFailureAndWrap((delegate, searchRequest) -> { Index[] concreteIndices = resolvedIndices.getConcreteLocalIndices(); final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 69bdfdea31ae4..b9200a0e32736 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -40,14 +40,14 @@ protected HandledTransportAction( Writeable.Reader requestReader, Executor executor ) { - super(actionName, actionFilters, transportService.getTaskManager()); + super(actionName, actionFilters, transportService.getTaskManager(), executor); transportService.registerRequestHandler( actionName, executor, false, canTripCircuitBreaker, requestReader, - (request, channel, task) -> execute(task, request, new ChannelActionListener<>(channel)) + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) ); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 33b64a9388c00..7c9dcb608ec84 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -958,7 +958,7 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { if (ignoreUnavailable()) { backwardsCompatibleOptions.add(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { if (allowFailureIndices()) { backwardsCompatibleOptions.add(Option.ALLOW_FAILURE_INDICES); } @@ -976,7 +976,7 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); - if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { failureStoreOptions.writeTo(out); } } @@ -989,7 +989,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti options.contains(Option.EXCLUDE_ALIASES) ); boolean allowFailureIndices = true; - if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { allowFailureIndices = options.contains(Option.ALLOW_FAILURE_INDICES); } GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder() @@ -998,7 +998,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti .allowFailureIndices(allowFailureIndices) .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); - FailureStoreOptions failureStoreOptions = in.getTransportVersion().onOrAfter(TransportVersions.ADD_FAILURE_STORE_INDICES_OPTIONS) + FailureStoreOptions failureStoreOptions = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? FailureStoreOptions.read(in) : FailureStoreOptions.DEFAULT; return new IndicesOptions( diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index ee4433369f689..4022ef3873771 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -381,12 +381,12 @@ private boolean assertCompleteAllowed() { } // only used in assertions - boolean allowedExecutors(Thread thread1, Thread thread2) { + boolean allowedExecutors(Thread blockedThread, Thread completingThread) { // this should only be used to validate thread interactions, like not waiting for a future completed on the same // executor, hence calling it with the same thread indicates a bug in the assertion using this. - assert thread1 != thread2 : "only call this for different threads"; - String thread1Name = EsExecutors.executorName(thread1); - String thread2Name = EsExecutors.executorName(thread2); - return thread1Name == null || thread2Name == null || thread1Name.equals(thread2Name) == false; + assert blockedThread != completingThread : "only call this for different threads"; + String blockedThreadName = EsExecutors.executorName(blockedThread); + String completingThreadName = EsExecutors.executorName(completingThread); + return blockedThreadName == null || completingThreadName == null || blockedThreadName.equals(completingThreadName) == false; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java index 41949c7ce3c22..6a2673e3276fc 100644 --- a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java @@ -410,6 +410,32 @@ public void complete(ActionListener listener) { } } + /** + * Creates and returns a new {@link SubscribableListener} {@code L} and subscribes {@code nextStep} to this listener such that if this + * listener is completed successfully then the result is discarded and {@code nextStep} is invoked with argument {@code L}. If this + * listener is completed with exception {@code E} then so is {@code L}. + *

    + * This can be used to construct a sequence of async actions, each ignoring the result of the previous ones: + *

    +     * l.andThen(l1 -> forkAction1(args1, l1)).andThen(l2 -> forkAction2(args2, l2)).addListener(finalListener);
    +     * 
    + * After creating this chain, completing {@code l} with a successful response will call {@code forkAction1}, which will on completion + * call {@code forkAction2}, which will in turn pass its response to {@code finalListener}. A failure of any step will bypass the + * remaining steps and ultimately fail {@code finalListener}. + *

    + * The threading of the {@code nextStep} callback is the same as for listeners added with {@link #addListener}: if this listener is + * already complete then {@code nextStep} is invoked on the thread calling {@link #andThen} and in its thread context, but if this + * listener is incomplete then {@code nextStep} is invoked on the completing thread and in its thread context. In other words, if you + * want to ensure that {@code nextStep} is invoked using a particular executor, then you must do both of: + *

      + *
    • Ensure that this {@link SubscribableListener} is always completed using that executor, and
    • + *
    • Invoke {@link #andThen} using that executor.
    • + *
    + */ + public SubscribableListener andThen(CheckedConsumer, ? extends Exception> nextStep) { + return newForked(l -> addListener(l.delegateFailureIgnoreResponseAndWrap(nextStep))); + } + /** * Creates and returns a new {@link SubscribableListener} {@code L} and subscribes {@code nextStep} to this listener such that if this * listener is completed successfully with result {@code R} then {@code nextStep} is invoked with arguments {@code L} and {@code R}. If diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index 222941981f05a..65a7e2302b9ae 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -14,11 +14,14 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; public abstract class TransportAction { @@ -26,22 +29,46 @@ public abstract class TransportAction { + void execute(Task task, Request request, ActionListener listener); + } + + protected TransportAction(String actionName, ActionFilters actionFilters, TaskManager taskManager, Executor executor) { this.actionName = actionName; this.filters = actionFilters.filters(); this.taskManager = taskManager; + this.executor = executor; } /** * Use this method when the transport action should continue to run in the context of the current task */ + protected final void executeDirect(Task task, Request request, ActionListener listener) { + handleExecution(task, request, listener, this::doExecute); + } + public final void execute(Task task, Request request, ActionListener listener) { + handleExecution( + task, + request, + listener, + executor == EsExecutors.DIRECT_EXECUTOR_SERVICE ? this::doExecute : this::doExecuteForking + ); + } + + private void handleExecution( + Task task, + Request request, + ActionListener listener, + TransportActionHandler handler + ) { final ActionRequestValidationException validationException; try { validationException = request.validate(); @@ -64,10 +91,14 @@ public final void execute(Task task, Request request, ActionListener l // Releasables#releaseOnce to avoid a double-release. request.mustIncRef(); final var releaseRef = Releasables.releaseOnce(request::decRef); - RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger, releaseRef); + RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger, handler, releaseRef); requestFilterChain.proceed(task, actionName, request, ActionListener.runBefore(listener, releaseRef::close)); } + private void doExecuteForking(Task task, Request request, ActionListener listener) { + executor.execute(ActionRunnable.wrap(listener, l -> doExecute(task, request, listener))); + } + protected abstract void doExecute(Task task, Request request, ActionListener listener); private static class RequestFilterChain @@ -75,13 +106,20 @@ private static class RequestFilterChain { private final TransportAction action; + private final TransportActionHandler handler; private final AtomicInteger index = new AtomicInteger(); private final Logger logger; private final Releasable releaseRef; - private RequestFilterChain(TransportAction action, Logger logger, Releasable releaseRef) { + private RequestFilterChain( + TransportAction action, + Logger logger, + TransportActionHandler handler, + Releasable releaseRef + ) { this.action = action; this.logger = logger; + this.handler = handler; this.releaseRef = releaseRef; } @@ -93,7 +131,7 @@ public void proceed(Task task, String actionName, Request request, ActionListene this.action.filters[i].apply(task, actionName, request, listener, this); } else if (i == this.action.filters.length) { try (releaseRef) { - this.action.doExecute(task, request, listener); + handler.execute(task, request, listener); } } else { listener.onFailure(new IllegalStateException("proceed was called too many times")); @@ -103,7 +141,6 @@ public void proceed(Task task, String actionName, Request request, ActionListene listener.onFailure(e); } } - } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java index 8aa6bc4de109a..b76dfe07e18ed 100644 --- a/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/UnsafePlainActionFuture.java @@ -10,36 +10,33 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; -import java.util.Objects; +import java.util.Set; /** * An unsafe future. You should not need to use this for new code, rather you should be able to convert that code to be async * or use a clear hierarchy of thread pool executors around the future. - * + *

    * This future is unsafe, since it allows notifying the future on the same thread pool executor that it is being waited on. This * is a common deadlock scenario, since all threads may be waiting and thus no thread may be able to complete the future. + *

    + * Note that the deadlock protection in {@link PlainActionFuture} is very weak. In general there's a risk of deadlock if there's any cycle + * of threads which block/complete on each other's futures, or dispatch work to each other, but this is much harder to detect. */ @Deprecated(forRemoval = true) public class UnsafePlainActionFuture extends PlainActionFuture { - - private final String unsafeExecutor; - private final String unsafeExecutor2; - - public UnsafePlainActionFuture(String unsafeExecutor) { - this(unsafeExecutor, null); - } - - public UnsafePlainActionFuture(String unsafeExecutor, String unsafeExecutor2) { - Objects.requireNonNull(unsafeExecutor); - this.unsafeExecutor = unsafeExecutor; - this.unsafeExecutor2 = unsafeExecutor2; + private final Set unsafeExecutors; + + /** + * Create a future which permits any of the given named executors to be used unsafely (i.e. used for both waiting for the future's + * completion and completing the future). + */ + public UnsafePlainActionFuture(String... unsafeExecutors) { + assert unsafeExecutors.length > 0 : "use PlainActionFuture if there are no executors to use unsafely"; + this.unsafeExecutors = Set.of(unsafeExecutors); } @Override - boolean allowedExecutors(Thread thread1, Thread thread2) { - return super.allowedExecutors(thread1, thread2) - || unsafeExecutor.equals(EsExecutors.executorName(thread1)) - || unsafeExecutor2 == null - || unsafeExecutor2.equals(EsExecutors.executorName(thread1)); + boolean allowedExecutors(Thread blockedThread, Thread completingThread) { + return super.allowedExecutors(blockedThread, completingThread) || unsafeExecutors.contains(EsExecutors.executorName(blockedThread)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 269ebd80fb36a..d547f26bd2061 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.support.master; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,7 +33,8 @@ public abstract class MasterNodeRequest * For internally-generated requests, choose an appropriate timeout. Often this will be {@link TimeValue#MAX_VALUE} (or {@link - * TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) since usually we want internal requests to wait for as long + * TimeValue#MINUS_ONE} which means an infinite timeout in 8.14.0 onwards (see #107050) since usually we want internal requests to wait for as long * as necessary to complete. * * @deprecated all requests should specify a timeout, see #107984. @@ -51,15 +53,20 @@ public abstract class MasterNodeRequest - * For requests which originate in the REST layer, use {@link - * org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the timeout. - *

    - * For internally-generated requests, choose an appropriate timeout. Often this will be {@link - * TimeValue#MAX_VALUE} (or {@link TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) - * since usually we want internal requests to wait for as long as necessary to complete. + * processing other tasks: + *

      + *
    • + * For requests which originate in the REST layer, use + * {@link org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the timeout. + *
    • + *
    • + * For internally-generated requests, choose an appropriate timeout. Often this will be an infinite + * timeout, see {@link #infiniteMasterNodeTimeout}, since it is reasonable to wait for as long as necessary + * for internal requests to complete. + *
    • + *
    */ + // TODO forbid TimeValue#MAX_VALUE once support for version prior to 8.14 dropped protected MasterNodeRequest(TimeValue masterNodeTimeout) { this.masterNodeTimeout = Objects.requireNonNull(masterNodeTimeout); this.masterTerm = 0L; @@ -127,4 +134,14 @@ public final TimeValue masterNodeTimeout() { public final long masterTerm() { return masterTerm; } + + /** + * @return a {@link TimeValue} which represents an infinite master-node timeout, suitable for sending using the given transport version. + * Versions prior to 8.14 did not reliably support {@link TimeValue#MINUS_ONE} for this purpose so for these versions we use + * {@link TimeValue#MAX_VALUE} as the best available alternative. + * @see #107050 + */ + public static TimeValue infiniteMasterNodeTimeout(TransportVersion transportVersion) { + return transportVersion.onOrAfter(TransportVersions.V_8_14_0) ? TimeValue.MINUS_ONE : TimeValue.MAX_VALUE; + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index fcd513b175bb1..347edd0916fc5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -78,7 +78,9 @@ protected TransportNodesAction( Writeable.Reader nodeRequest, Executor executor ) { - super(actionName, actionFilters, transportService.getTaskManager()); + // Only part of this action execution needs to be forked off - coordination can run on SAME because it's only O(#nodes) work. + // Hence the separate "finalExecutor", and why we run the whole TransportAction.execute on SAME. + super(actionName, actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE); assert executor.equals(EsExecutors.DIRECT_EXECUTOR_SERVICE) == false : "TransportNodesAction must always fork off the transport thread"; this.clusterService = Objects.requireNonNull(clusterService); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index c2d7e173fd0bf..7e3e5bdee206d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -117,6 +117,20 @@ protected enum SyncGlobalCheckpointAfterOperation { AttemptAfterSuccess } + /** + * Execution of the replica action + */ + protected enum ReplicaActionExecution { + /** + * Will only execute when permitted by the configured circuit breakers + */ + SubjectToCircuitBreaker, + /** + * Will bypass the configured circuit breaker checks + */ + BypassCircuitBreaker + } + /** * The timeout for retrying replication requests. */ @@ -170,11 +184,14 @@ protected TransportReplicationAction( Writeable.Reader replicaRequestReader, Executor executor, SyncGlobalCheckpointAfterOperation syncGlobalCheckpointAfterOperation, - PrimaryActionExecution primaryActionExecution + PrimaryActionExecution primaryActionExecution, + ReplicaActionExecution replicaActionExecution ) { - super(actionName, actionFilters, transportService.getTaskManager()); + // TODO: consider passing the executor, investigate doExecute and let InboundHandler/TransportAction handle concurrency. + super(actionName, actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE); assert syncGlobalCheckpointAfterOperation != null : "Must specify global checkpoint sync behaviour"; assert primaryActionExecution != null : "Must specify primary action execution behaviour"; + assert replicaActionExecution != null : "Must specify replica action execution behaviour"; this.threadPool = threadPool; this.transportService = transportService; this.clusterService = clusterService; @@ -208,12 +225,15 @@ protected TransportReplicationAction( this::handlePrimaryRequest ); - // we must never reject on because of thread pool capacity on replicas + boolean canTripCircuitBreakerOnReplica = switch (replicaActionExecution) { + case BypassCircuitBreaker -> false; + case SubjectToCircuitBreaker -> true; + }; transportService.registerRequestHandler( transportReplicaAction, executor, - true, - true, + true, // we must never reject because of thread pool capacity on replicas + canTripCircuitBreakerOnReplica, in -> new ConcreteReplicaRequest<>(replicaRequestReader, in), this::handleReplicaRequest ); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index f380710cc0794..90b636ed69e24 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -78,7 +78,8 @@ protected TransportWriteAction( BiFunction executorFunction, PrimaryActionExecution primaryActionExecution, IndexingPressure indexingPressure, - SystemIndices systemIndices + SystemIndices systemIndices, + ReplicaActionExecution replicaActionExecution ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -95,7 +96,8 @@ protected TransportWriteAction( replicaRequest, EsExecutors.DIRECT_EXECUTOR_SERVICE, SyncGlobalCheckpointAfterOperation.AttemptAfterSuccess, - primaryActionExecution + primaryActionExecution, + replicaActionExecution ); this.executorFunction = executorFunction; this.indexingPressure = indexingPressure; diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index a6a6b2c332a0a..180aa3b336149 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -72,7 +72,8 @@ protected TransportSingleShardAction( Writeable.Reader request, Executor executor ) { - super(actionName, actionFilters, transportService.getTaskManager()); + // TODO: consider passing the executor, remove it from doExecute and let InboundHandler/TransportAction handle concurrency. + super(actionName, actionFilters, transportService.getTaskManager(), EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.transportService = transportService; @@ -250,7 +251,7 @@ private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, final TransportChannel channel, Task task) throws Exception { // if we have a local operation, execute it on a thread since we don't spawn - execute(task, request, new ChannelActionListener<>(channel)); + executeDirect(task, request, new ChannelActionListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymRuleAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymRuleAction.java index 68a6caa4b0550..f0c4e17abb382 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymRuleAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymRuleAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymsAction.java index d05ae9c0637b6..c22bb308f42de 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportDeleteSynonymsAction.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymRuleAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymRuleAction.java index c5deaf21e6acf..5a574def93e0d 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymRuleAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymRuleAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java index 24b31fd38a8be..09d087e85e9e2 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsSetsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsSetsAction.java index a0d74270e0694..13177f475354d 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsSetsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportGetSynonymsSetsAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymRuleAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymRuleAction.java index 46a9a35beae4b..eb0b87ae0211a 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymRuleAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymRuleAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java index d4f3d04e47e5d..4f5589bd85ed7 100644 --- a/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java +++ b/server/src/main/java/org/elasticsearch/action/synonyms/TransportPutSynonymsAction.java @@ -12,8 +12,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.synonyms.SynonymsManagementAPIService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 89129a1aec6a7..b4095d04baff2 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -17,10 +17,10 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index d0277e8ce8c80..84f6d433af882 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index 1375ae7fb6c3a..75bbbf96136a9 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index cd899d732e916..00d33ec9d2eca 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentHelper; @@ -49,6 +48,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 6b54654d7fbe9..056eb70c53269 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.DocumentSizeObserver; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.UpdateCtxMap; @@ -181,14 +181,14 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResult getResult, boolean detectNoop) { final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final DocumentSizeObserver documentSizeObserver = documentParsingProvider.newDocumentSizeObserver(request); + final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); final boolean noop = XContentHelper.update( updatedSourceAsMap, - currentRequest.sourceAsMap(documentSizeObserver), + currentRequest.sourceAsMap(meteringParserDecorator), detectNoop ) == false; @@ -226,8 +226,8 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()) - .setNormalisedBytesParsed(documentSizeObserver.normalisedBytesParsed()); - + .setOriginatesFromUpdateByDoc(true); + finalIndexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); return new Result(finalIndexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index c1ee0f7b8af37..587ed2ef75eba 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -68,6 +68,7 @@ public UpdateRequestBuilder(ElasticsearchClient client) { this(client, null, null); } + @SuppressWarnings("this-escape") public UpdateRequestBuilder(ElasticsearchClient client, String index, String id) { super(client, TransportUpdateAction.TYPE); setIndex(index); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java b/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java index 5d7d764fdae1f..14027a417e2f6 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/StartupException.java @@ -8,8 +8,8 @@ package org.elasticsearch.bootstrap; -import org.elasticsearch.common.inject.CreationException; -import org.elasticsearch.common.inject.spi.Message; +import org.elasticsearch.injection.guice.CreationException; +import org.elasticsearch.injection.guice.spi.Message; import java.io.PrintStream; import java.util.Objects; @@ -25,7 +25,7 @@ public final class StartupException extends Exception { /** maximum length of a stacktrace, before we truncate it */ static final int STACKTRACE_LIMIT = 30; /** all lines from this package are RLE-compressed */ - static final String GUICE_PACKAGE = "org.elasticsearch.common.inject"; + static final String GUICE_PACKAGE = "org.elasticsearch.injection.guice"; public StartupException(Throwable cause) { super(Objects.requireNonNull(cause)); diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index f14a2f6fb5247..4d5a670925b5b 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -64,10 +64,6 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.admin.cluster.shards.TransportClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.clone.TransportCloneSnapshotAction; @@ -98,16 +94,6 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; import org.elasticsearch.action.ingest.DeletePipelineTransportAction; @@ -122,9 +108,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; @@ -286,14 +270,6 @@ public CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds) { return new CancelTasksRequestBuilder(this).setNodesIds(nodesIds); } - public void searchShards(final ClusterSearchShardsRequest request, final ActionListener listener) { - execute(TransportClusterSearchShardsAction.TYPE, request, listener); - } - - public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) { - return new ClusterSearchShardsRequestBuilder(this).setIndices(indices); - } - public void putRepository(PutRepositoryRequest request, ActionListener listener) { execute(TransportPutRepositoryAction.TYPE, request, listener); } @@ -302,15 +278,6 @@ public PutRepositoryRequestBuilder preparePutRepository(TimeValue masterNodeTime return new PutRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public PutRepositoryRequestBuilder preparePutRepository(String name) { - return preparePutRepository( - MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, - AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, - name - ); - } - public void deleteRepository(DeleteRepositoryRequest request, ActionListener listener) { execute(TransportDeleteRepositoryAction.TYPE, request, listener); } @@ -319,15 +286,6 @@ public DeleteRepositoryRequestBuilder prepareDeleteRepository(TimeValue masterNo return new DeleteRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) { - return prepareDeleteRepository( - MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, - AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, - name - ); - } - public void getRepositories(GetRepositoriesRequest request, ActionListener listener) { execute(GetRepositoriesAction.INSTANCE, request, listener); } @@ -360,11 +318,6 @@ public void createSnapshot(CreateSnapshotRequest request, ActionListener listener) { execute(TransportDeleteSnapshotAction.TYPE, request, listener); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String... names) { - return prepareDeleteSnapshot(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, repository, names); - } - public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(TimeValue masterNodeTimeout, String repository, String... names) { return new DeleteSnapshotRequestBuilder(this, masterNodeTimeout, repository, names); } @@ -411,11 +354,6 @@ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener simulatePipeline(SimulatePipelineR public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType) { return new SimulatePipelineRequestBuilder(this, source, xContentType); } - - public PutStoredScriptRequestBuilder preparePutStoredScript() { - return new PutStoredScriptRequestBuilder(this); - } - - public void deleteStoredScript(DeleteStoredScriptRequest request, ActionListener listener) { - execute(TransportDeleteStoredScriptAction.TYPE, request, listener); - } - - public DeleteStoredScriptRequestBuilder prepareDeleteStoredScript(String id) { - return new DeleteStoredScriptRequestBuilder(client).setId(id); - } - - public void putStoredScript(final PutStoredScriptRequest request, ActionListener listener) { - execute(TransportPutStoredScriptAction.TYPE, request, listener); - - } - - public GetStoredScriptRequestBuilder prepareGetStoredScript(String id) { - return new GetStoredScriptRequestBuilder(this).setId(id); - } - - public void getStoredScript(final GetStoredScriptRequest request, final ActionListener listener) { - execute(GetStoredScriptAction.INSTANCE, request, listener); - } } diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index f4e86c8a4eca6..c2268bfd9dc62 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -93,6 +93,7 @@ public abstract class AbstractClient implements Client { private final ThreadPool threadPool; private final AdminClient admin; + @SuppressWarnings("this-escape") public AbstractClient(Settings settings, ThreadPool threadPool) { this.settings = settings; this.threadPool = threadPool; diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 29933ad20ef10..3fba3a7bdbe13 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -58,7 +58,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -72,10 +71,12 @@ import org.elasticsearch.health.node.selection.HealthNodeTaskExecutor; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.injection.guice.AbstractModule; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksNodeService; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.script.ScriptMetadata; +import org.elasticsearch.snapshots.RegisteredPolicySnapshots; import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; @@ -234,6 +235,12 @@ public static List getNamedWriteables() { registerMetadataCustom(entries, NodesShutdownMetadata.TYPE, NodesShutdownMetadata::new, NodesShutdownMetadata::readDiffFrom); registerMetadataCustom(entries, FeatureMigrationResults.TYPE, FeatureMigrationResults::new, FeatureMigrationResults::readDiffFrom); registerMetadataCustom(entries, DesiredNodesMetadata.TYPE, DesiredNodesMetadata::new, DesiredNodesMetadata::readDiffFrom); + registerMetadataCustom( + entries, + RegisteredPolicySnapshots.TYPE, + RegisteredPolicySnapshots::new, + RegisteredPolicySnapshots.RegisteredSnapshotsDiff::new + ); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java index 711c2a7fee8e0..dd4194b60e6ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -39,7 +39,8 @@ public ClusterName(StreamInput input) throws IOException { } public ClusterName(String value) { - this.value = value.intern(); + // cluster name string is most likely part of a setting so we can speed things up over outright interning here + this.value = Settings.internKeyOrValue(value); } public String value() { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c54269da68507..30e9a9a3779d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1081,7 +1081,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); + out.writeMap(compatibilityVersions, StreamOutput::writeWriteable); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { clusterFeatures.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index cc94137afa322..d765755b5d250 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -17,13 +17,13 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index ee1d29ab778f1..aa8ba17a0f05f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.IndicesAdminClient; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -24,6 +23,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.xcontent.XContentType; /** diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index a01383b3eaa93..ca5fab1087cdc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -36,7 +36,6 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -46,6 +45,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 2f604f1b95974..e922d130d7f83 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -223,7 +224,7 @@ public Coordinator( this.onJoinValidators = NodeJoinExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); this.electionStrategy = electionStrategy; - this.joinReasonService = new JoinReasonService(transportService.getThreadPool()::relativeTimeInMillis); + this.joinReasonService = new JoinReasonService(transportService.getThreadPool().relativeTimeInMillisSupplier()); this.joinHelper = new JoinHelper( allocationService, masterService, @@ -831,10 +832,12 @@ public void run() { discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \ not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \ cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \ - the node's data path(s). Remove the discovery configuration to suppress this message.""", + the node's data path(s). Remove the discovery configuration to suppress this message. See [{}] for \ + more information.""", applierState.metadata().clusterUUID(), DISCOVERY_SEED_HOSTS_SETTING.getKey(), - DISCOVERY_SEED_HOSTS_SETTING.get(settings) + DISCOVERY_SEED_HOSTS_SETTING.get(settings), + ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistory.java b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistory.java index ba7d22fa6a673..720510e50c48d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistory.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistory.java @@ -59,7 +59,7 @@ public class MasterHistory implements ClusterStateListener { @SuppressWarnings("this-escape") public MasterHistory(ThreadPool threadPool, ClusterService clusterService) { this.masterHistory = new ArrayList<>(); - this.currentTimeMillisSupplier = threadPool::relativeTimeInMillis; + this.currentTimeMillisSupplier = threadPool.relativeTimeInMillisSupplier(); this.maxHistoryAge = MAX_HISTORY_AGE_SETTING.get(clusterService.getSettings()); this.clusterService = clusterService; clusterService.addListener(this); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java index 8482eb735b0d9..1b2bf81bc85c3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/MasterHistoryService.java @@ -61,7 +61,7 @@ public class MasterHistoryService { public MasterHistoryService(TransportService transportService, ThreadPool threadPool, ClusterService clusterService) { this.transportService = transportService; this.localMasterHistory = new MasterHistory(threadPool, clusterService); - this.currentTimeMillisSupplier = threadPool::relativeTimeInMillis; + this.currentTimeMillisSupplier = threadPool.relativeTimeInMillisSupplier(); this.acceptableRemoteHistoryAge = REMOTE_HISTORY_TIME_TO_LIVE_SETTING.get(clusterService.getSettings()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index c0102437ad9d0..227e78ed37114 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -230,9 +230,17 @@ private ClusterState deserializeAndApplyDiff(BytesTransportRequest request, Stre private void acceptState(ClusterState incomingState, ActionListener actionListener) { assert incomingState.nodes().isLocalNodeElectedMaster() == false : "should handle local publications locally, but got " + incomingState; - clusterCoordinationExecutor.execute( - ActionRunnable.supply(actionListener, () -> handlePublishRequest.apply(new PublishRequest(incomingState))) - ); + clusterCoordinationExecutor.execute(ActionRunnable.supply(actionListener, new CheckedSupplier<>() { + @Override + public PublishWithJoinResponse get() { + return handlePublishRequest.apply(new PublishRequest(incomingState)); + } + + @Override + public String toString() { + return "acceptState[term=" + incomingState.term() + ",version=" + incomingState.version() + "]"; + } + })); } public PublicationContext newPublicationContext(ClusterStatePublicationEvent clusterStatePublicationEvent) { diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index d025ddab26af6..c53395b5d76c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,7 @@ public enum ClusterHealthStatus implements Writeable { YELLOW((byte) 1), RED((byte) 2); - private byte value; + private final byte value; ClusterHealthStatus(byte value) { this.value = value; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java index a0f4a929dafdb..ff412d629b3b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java @@ -396,6 +396,8 @@ public static AliasMetadata fromXContent(XContentParser parser) throws IOExcepti } else if ("is_hidden".equals(currentFieldName)) { builder.isHidden(parser.booleanValue()); } + } else if (token == null) { + throw new IllegalArgumentException("unexpected null token while parsing alias"); } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 03b23c462ecec..8acee4f6be821 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -69,7 +69,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0; - public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.DATA_STREAM_AUTO_SHARDING_EVENT; + public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0; public static boolean isFailureStoreFeatureFlagEnabled() { return FAILURE_STORE_FEATURE_FLAG.isEnabled(); @@ -277,6 +277,18 @@ public boolean rolloverOnWrite() { return backingIndices.rolloverOnWrite; } + /** + * We define that a data stream is considered internal either if it is a system index or if + * its name starts with a dot. + * + * Note: Dot-prefixed internal data streams is a naming convention for internal data streams, + * but it's not yet enforced. + * @return true if it's a system index or has a dot-prefixed name. + */ + public boolean isInternal() { + return isSystem() || name.charAt(0) == '.'; + } + /** * @param timestamp The timestamp used to select a backing index based on its start and end time. * @param metadata The metadata that is used to fetch the start and end times for backing indices of this data stream. @@ -796,12 +808,12 @@ public List getIndicesPastRetention( ) { if (lifecycle == null || lifecycle.isEnabled() == false - || lifecycle.getEffectiveDataRetention(isSystem() ? null : globalRetention) == null) { + || lifecycle.getEffectiveDataRetention(globalRetention, isInternal()) == null) { return List.of(); } List indicesPastRetention = getNonWriteIndicesOlderThan( - lifecycle.getEffectiveDataRetention(isSystem() ? null : globalRetention), + lifecycle.getEffectiveDataRetention(globalRetention, isInternal()), indexMetadataSupplier, this::isIndexManagedByDataStreamLifecycle, nowSupplier @@ -1202,7 +1214,7 @@ public XContentBuilder toXContent( } if (lifecycle != null) { builder.field(LIFECYCLE.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, isSystem() ? null : globalRetention); + lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention, isInternal()); } builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), backingIndices.rolloverOnWrite); if (backingIndices.autoShardingEvent != null) { @@ -1376,6 +1388,25 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp } } + /** + * Resolve the index abstraction to a data stream. This handles alias resolution as well as data stream resolution. This does NOT + * resolve a data stream by providing a concrete backing index. + */ + public static DataStream resolveDataStream(IndexAbstraction indexAbstraction, Metadata metadata) { + // We do not consider concrete indices - only data streams and data stream aliases. + if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { + return null; + } + + // Locate the write index for the abstraction, and check if it has a data stream associated with it. + Index writeIndex = indexAbstraction.getWriteIndex(); + if (writeIndex == null) { + return null; + } + IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); + return writeAbstraction.getParentDataStream(); + } + /** * Modifies the passed Instant object to be used as a bound for a timestamp field in TimeSeries. It needs to be called in both backing * index construction (rollover) and index selection for doc insertion. Failure to do so may lead to errors due to document timestamps diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java index f260b48cd7b7a..32bf46ce45919 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java @@ -88,7 +88,7 @@ public DataStreamAction(StreamInput in) throws IOException { this.type = Type.fromValue(in.readByte()); this.dataStream = in.readString(); this.index = in.readString(); - this.failureStore = in.getTransportVersion().onOrAfter(TransportVersions.MODIFY_DATA_STREAM_FAILURE_STORES) && in.readBoolean(); + this.failureStore = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) && in.readBoolean(); } private DataStreamAction(Type type, String dataStream, String index, boolean failureStore) { @@ -155,7 +155,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(type.value()); out.writeString(dataStream); out.writeString(index); - if (out.getTransportVersion().onOrAfter(TransportVersions.MODIFY_DATA_STREAM_FAILURE_STORES)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeBoolean(failureStore); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 5b96f92193e98..be42916b07956 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -17,7 +17,9 @@ * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be * used if a user hasn't provided other retention configuration via {@link DataStreamGlobalRetention} metadata in the * cluster state. + * @deprecated This interface is deprecated, please use {@link DataStreamGlobalRetentionSettings}. */ +@Deprecated public interface DataStreamFactoryRetention { @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java index f691151eee95e..185f625f6f91f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -8,50 +8,26 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.cluster.AbstractNamedDiffable; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Iterator; -import java.util.Objects; /** - * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: - * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined - * - max retention, applied on every data stream managed by DSL + * Wrapper class for the {@link DataStreamGlobalRetentionSettings}. */ -public final class DataStreamGlobalRetention extends AbstractNamedDiffable implements ClusterState.Custom { - - public static final String TYPE = "data-stream-global-retention"; +public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable { public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention"); - - public static final ParseField DEFAULT_RETENTION_FIELD = new ParseField("default_retention"); - public static final ParseField MAX_RETENTION_FIELD = new ParseField("max_retention"); - - public static final DataStreamGlobalRetention EMPTY = new DataStreamGlobalRetention(null, null); public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); - @Nullable - private final TimeValue defaultRetention; - @Nullable - private final TimeValue maxRetention; - /** * @param defaultRetention the default retention or null if it's undefined - * @param maxRetention the max retention or null if it's undefined + * @param maxRetention the max retention or null if it's undefined * @throws IllegalArgumentException when the default retention is greater than the max retention. */ public DataStreamGlobalRetention(TimeValue defaultRetention, TimeValue maxRetention) { @@ -79,78 +55,12 @@ public static DataStreamGlobalRetention read(StreamInput in) throws IOException return new DataStreamGlobalRetention(in.readOptionalTimeValue(), in.readOptionalTimeValue()); } - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ADD_DATA_STREAM_GLOBAL_RETENTION; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(defaultRetention); out.writeOptionalTimeValue(maxRetention); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(ClusterState.Custom.class, TYPE, in); - } - - @Override - public Iterator toXContentChunked(ToXContent.Params ignored) { - return Iterators.single(this::toXContentFragment); - } - - /** - * Adds to the XContentBuilder the two fields when they are not null. - */ - public XContentBuilder toXContentFragment(XContentBuilder builder, ToXContent.Params params) throws IOException { - if (defaultRetention != null) { - builder.field(DEFAULT_RETENTION_FIELD.getPreferredName(), defaultRetention.getStringRep()); - } - if (maxRetention != null) { - builder.field(MAX_RETENTION_FIELD.getPreferredName(), maxRetention.getStringRep()); - } - return builder; - } - - /** - * Returns the metadata found in the cluster state or null. When trying to retrieve the effective global retention, - * prefer to use the {@link DataStreamGlobalRetentionResolver#resolve(ClusterState)} because it takes into account - * the factory retention settings as well. Only use this, if you only want to know the global retention settings - * stored in the cluster metadata. - */ - @Nullable - public static DataStreamGlobalRetention getFromClusterState(ClusterState clusterState) { - return clusterState.custom(DataStreamGlobalRetention.TYPE); - } - - @Nullable - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Nullable - public TimeValue getMaxRetention() { - return maxRetention; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DataStreamGlobalRetention that = (DataStreamGlobalRetention) o; - return Objects.equals(defaultRetention, that.defaultRetention) && Objects.equals(maxRetention, that.maxRetention); - } - - @Override - public int hashCode() { - return Objects.hash(defaultRetention, maxRetention); - } - @Override public String toString() { return "DataStreamGlobalRetention{" diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionResolver.java deleted file mode 100644 index ab44595e37c13..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionResolver.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.core.Nullable; - -/** - * Resolves the global retention configuration for data stream lifecycle taking into consideration the - * metadata in the cluster state and the factory settings. - * Currently, we give precedence to the configuration in the metadata and fallback to the factory settings when it's not present. - */ -public class DataStreamGlobalRetentionResolver { - - private final DataStreamFactoryRetention factoryRetention; - - public DataStreamGlobalRetentionResolver(DataStreamFactoryRetention factoryRetention) { - this.factoryRetention = factoryRetention; - } - - /** - * Return the global retention configuration as found in the metadata. If the metadata is null, then it falls back - * to the factory retention. Returns null if both the cluster metadata for global retention and the factory retention - * are null. - */ - @Nullable - public DataStreamGlobalRetention resolve(ClusterState clusterState) { - DataStreamGlobalRetention globalRetentionFromClusterState = DataStreamGlobalRetention.getFromClusterState(clusterState); - if (globalRetentionFromClusterState != null || factoryRetention.isDefined() == false) { - return globalRetentionFromClusterState; - } - return new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention()); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java new file mode 100644 index 0000000000000..a1fcf56a92726 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This class holds the data stream global retention settings. It defines, validates and monitors the settings. + *

    + * The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of: + * - The default retention which applies to data streams that do not have a retention defined. + * - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value. + *

    + * Temporarily, we fall back to {@link DataStreamFactoryRetention} to facilitate a smooth transition to these settings. + */ +public class DataStreamGlobalRetentionSettings { + + private static final Logger logger = LogManager.getLogger(DataStreamGlobalRetentionSettings.class); + public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); + + public static final Setting DATA_STREAMS_DEFAULT_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.default", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull(settingValue); + TimeValue maxRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_MAX_RETENTION_SETTING)); + validateIsolatedRetentionValue(defaultRetention, DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_MAX_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DATA_STREAMS_MAX_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.max", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_DEFAULT_RETENTION_SETTING)); + TimeValue maxRetention = getSettingValueOrNull(settingValue); + validateIsolatedRetentionValue(maxRetention, DATA_STREAMS_MAX_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_DEFAULT_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final DataStreamFactoryRetention factoryRetention; + + @Nullable + private volatile TimeValue defaultRetention; + @Nullable + private volatile TimeValue maxRetention; + + private DataStreamGlobalRetentionSettings(DataStreamFactoryRetention factoryRetention) { + this.factoryRetention = factoryRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getMaxRetention() : maxRetention; + } + + @Nullable + public TimeValue getDefaultRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getDefaultRetention() : defaultRetention; + } + + public boolean areDefined() { + return getDefaultRetention() != null || getMaxRetention() != null; + } + + private boolean shouldFallbackToFactorySettings() { + return defaultRetention == null && maxRetention == null; + } + + /** + * Creates an instance and initialises the cluster settings listeners + * @param clusterSettings it will register the cluster settings listeners to monitor for changes + * @param factoryRetention for migration purposes, it will be removed shortly + */ + public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings, DataStreamFactoryRetention factoryRetention) { + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings(factoryRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention); + return dataStreamGlobalRetentionSettings; + } + + private void setMaxRetention(TimeValue maxRetention) { + this.maxRetention = getSettingValueOrNull(maxRetention); + logger.info("Updated max factory retention to [{}]", this.maxRetention == null ? null : maxRetention.getStringRep()); + } + + private void setDefaultRetention(TimeValue defaultRetention) { + this.defaultRetention = getSettingValueOrNull(defaultRetention); + logger.info("Updated default factory retention to [{}]", this.defaultRetention == null ? null : defaultRetention.getStringRep()); + } + + private static void validateIsolatedRetentionValue(@Nullable TimeValue retention, String settingName) { + if (retention != null && retention.getMillis() < MIN_RETENTION_VALUE.getMillis()) { + throw new IllegalArgumentException( + "Setting '" + settingName + "' should be greater than " + MIN_RETENTION_VALUE.getStringRep() + ); + } + } + + private static void validateGlobalRetentionConfiguration(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Setting [" + + DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey() + + "=" + + defaultRetention.getStringRep() + + "] cannot be greater than [" + + DATA_STREAMS_MAX_RETENTION_SETTING.getKey() + + "=" + + maxRetention.getStringRep() + + "]." + ); + } + } + + @Nullable + public DataStreamGlobalRetention get() { + if (areDefined() == false) { + return null; + } + return new DataStreamGlobalRetention(getDefaultRetention(), getMaxRetention()); + } + + /** + * Time value settings do not accept null as a value. To represent an undefined retention as a setting we use the value + * of -1 and this method converts this to null. + * + * @param value the retention as parsed from the setting + * @return the value when it is not -1 and null otherwise + */ + @Nullable + private static TimeValue getSettingValueOrNull(TimeValue value) { + return value == null || value.equals(TimeValue.MINUS_ONE) ? null : value; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 3fb5e92cb3359..bd9a65735be05 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,7 +24,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -55,6 +54,7 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; + public static final String EFFECTIVE_RETENTION_REST_API_CAPABILITY = "data_stream_lifecycle_effective_retention"; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; // The following XContent params are used to enrich the DataStreamLifecycle json with effective retention information @@ -65,6 +65,7 @@ public class DataStreamLifecycle implements SimpleDiffable, DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, "true" ); + public static final Tuple INFINITE_RETENTION = Tuple.tuple(null, RetentionSource.DATA_STREAM_CONFIGURATION); /** * Check if {@link #DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME} is present and set to {@code true}, indicating that @@ -145,41 +146,46 @@ public boolean isEnabled() { } /** - * The least amount of time data should be kept by elasticsearch. If a caller does not want the global retention considered (for - * example, when evaluating the effective retention for a system data stream or a template) then null should be given for - * globalRetention. - * @param globalRetention The global retention, or null if global retention does not exist or should not be applied + * The least amount of time data should be kept by elasticsearch. The effective retention is a function with three parameters, + * the {@link DataStreamLifecycle#dataRetention}, the global retention and whether this lifecycle is associated with an internal + * data stream. + * @param globalRetention The global retention, or null if global retention does not exist. + * @param isInternalDataStream A flag denoting if this lifecycle is associated with an internal data stream or not * @return the time period or null, null represents that data should never be deleted. */ @Nullable - public TimeValue getEffectiveDataRetention(@Nullable DataStreamGlobalRetention globalRetention) { - return getEffectiveDataRetentionWithSource(globalRetention).v1(); + public TimeValue getEffectiveDataRetention(@Nullable DataStreamGlobalRetention globalRetention, boolean isInternalDataStream) { + return getEffectiveDataRetentionWithSource(globalRetention, isInternalDataStream).v1(); } /** - * The least amount of time data should be kept by elasticsearch. If a caller does not want the global retention considered (for - * example, when evaluating the effective retention for a system data stream or a template) then null should be given for - * globalRetention. - * @param globalRetention The global retention, or null if global retention does not exist or should not be applied + * The least amount of time data should be kept by elasticsearch.. The effective retention is a function with three parameters, + * the {@link DataStreamLifecycle#dataRetention}, the global retention and whether this lifecycle is associated with an internal + * data stream. + * @param globalRetention The global retention, or null if global retention does not exist. + * @param isInternalDataStream A flag denoting if this lifecycle is associated with an internal data stream or not * @return A tuple containing the time period or null as v1 (where null represents that data should never be deleted), and the non-null * retention source as v2. */ - public Tuple getEffectiveDataRetentionWithSource(@Nullable DataStreamGlobalRetention globalRetention) { + public Tuple getEffectiveDataRetentionWithSource( + @Nullable DataStreamGlobalRetention globalRetention, + boolean isInternalDataStream + ) { // If lifecycle is disabled there is no effective retention if (enabled == false) { - return Tuple.tuple(null, RetentionSource.DATA_STREAM_CONFIGURATION); + return INFINITE_RETENTION; } var dataStreamRetention = getDataStreamRetention(); - if (globalRetention == null) { + if (globalRetention == null || isInternalDataStream) { return Tuple.tuple(dataStreamRetention, RetentionSource.DATA_STREAM_CONFIGURATION); } if (dataStreamRetention == null) { - return globalRetention.getDefaultRetention() != null - ? Tuple.tuple(globalRetention.getDefaultRetention(), RetentionSource.DEFAULT_GLOBAL_RETENTION) - : Tuple.tuple(globalRetention.getMaxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); + return globalRetention.defaultRetention() != null + ? Tuple.tuple(globalRetention.defaultRetention(), RetentionSource.DEFAULT_GLOBAL_RETENTION) + : Tuple.tuple(globalRetention.maxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); } - if (globalRetention.getMaxRetention() != null && globalRetention.getMaxRetention().getMillis() < dataStreamRetention.getMillis()) { - return Tuple.tuple(globalRetention.getMaxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); + if (globalRetention.maxRetention() != null && globalRetention.maxRetention().getMillis() < dataStreamRetention.getMillis()) { + return Tuple.tuple(globalRetention.maxRetention(), RetentionSource.MAX_GLOBAL_RETENTION); } else { return Tuple.tuple(dataStreamRetention, RetentionSource.DATA_STREAM_CONFIGURATION); } @@ -187,7 +193,7 @@ public Tuple getEffectiveDataRetentionWithSource(@Nu /** * The least amount of time data the data stream is requesting es to keep the data. - * NOTE: this can be overridden by the {@link DataStreamLifecycle#getEffectiveDataRetention(DataStreamGlobalRetention)}. + * NOTE: this can be overridden by the {@link DataStreamLifecycle#getEffectiveDataRetention(DataStreamGlobalRetention,boolean)}. * @return the time period or null, null represents that data should never be deleted. */ @Nullable @@ -199,12 +205,16 @@ public TimeValue getDataStreamRetention() { * This method checks if the effective retention is matching what the user has configured; if the effective retention * does not match then it adds a warning informing the user about the effective retention and the source. */ - public void addWarningHeaderIfDataRetentionNotEffective(@Nullable DataStreamGlobalRetention globalRetention) { - if (globalRetention == null) { + public void addWarningHeaderIfDataRetentionNotEffective( + @Nullable DataStreamGlobalRetention globalRetention, + boolean isInternalDataStream + ) { + if (globalRetention == null || isInternalDataStream) { return; } Tuple effectiveDataRetentionWithSource = getEffectiveDataRetentionWithSource( - globalRetention + globalRetention, + isInternalDataStream ); if (effectiveDataRetentionWithSource.v1() == null) { return; @@ -318,7 +328,7 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, null, null); + return toXContent(builder, params, null, null, false); } /** @@ -326,12 +336,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * and injects the RolloverConditions if they exist. * In order to request the effective retention you need to set {@link #INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME} to true * in the XContent params. + * NOTE: this is used for serialising user output and the result is never deserialised in elasticsearch. */ public XContentBuilder toXContent( XContentBuilder builder, Params params, @Nullable RolloverConfiguration rolloverConfiguration, - @Nullable DataStreamGlobalRetention globalRetention + @Nullable DataStreamGlobalRetention globalRetention, + boolean isInternalDataStream ) throws IOException { builder.startObject(); builder.field(ENABLED_FIELD.getPreferredName(), enabled); @@ -342,11 +354,14 @@ public XContentBuilder toXContent( builder.field(DATA_RETENTION_FIELD.getPreferredName(), dataRetention.value().getStringRep()); } } + Tuple effectiveDataRetentionWithSource = getEffectiveDataRetentionWithSource( + globalRetention, + isInternalDataStream + ); if (params.paramAsBoolean(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false)) { - Tuple effectiveRetention = getEffectiveDataRetentionWithSource(globalRetention); - if (effectiveRetention.v1() != null) { - builder.field(EFFECTIVE_RETENTION_FIELD.getPreferredName(), effectiveRetention.v1().getStringRep()); - builder.field(RETENTION_SOURCE_FIELD.getPreferredName(), effectiveRetention.v2().displayName()); + if (effectiveDataRetentionWithSource.v1() != null) { + builder.field(EFFECTIVE_RETENTION_FIELD.getPreferredName(), effectiveDataRetentionWithSource.v1().getStringRep()); + builder.field(RETENTION_SOURCE_FIELD.getPreferredName(), effectiveDataRetentionWithSource.v2().displayName()); } } @@ -356,25 +371,29 @@ public XContentBuilder toXContent( } if (rolloverConfiguration != null) { builder.field(ROLLOVER_FIELD.getPreferredName()); - rolloverConfiguration.evaluateAndConvertToXContent(builder, params, getEffectiveDataRetention(globalRetention)); + rolloverConfiguration.evaluateAndConvertToXContent(builder, params, effectiveDataRetentionWithSource.v1()); } builder.endObject(); return builder; } + /** + * This method deserialises XContent format as it was generated ONLY by {@link DataStreamLifecycle#toXContent(XContentBuilder, Params)}. + * It does not support the output of + * {@link DataStreamLifecycle#toXContent(XContentBuilder, Params, RolloverConfiguration, DataStreamGlobalRetention, boolean)} because + * this output is enriched with derived fields we do not handle in this deserialisation. + */ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } /** - * Adds a retention param to signal that this serialisation should include the effective retention metadata + * Adds a retention param to signal that this serialisation should include the effective retention metadata. + * @param params the XContent params to be extended with the new flag + * @return XContent params with `include_effective_retention` set to true. If the flag exists it will override it. */ - public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { - boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); - return new DelegatingMapParams( - Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), - params - ); + public static ToXContent.Params addEffectiveRetentionParams(ToXContent.Params params) { + return new DelegatingMapParams(INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); } public static Builder newBuilder(DataStreamLifecycle lifecycle) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 2b65a68e8d43c..742439c9a2484 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; @@ -129,7 +130,8 @@ public class IndexMetadata implements Diffable, ToXContentFragmen ); public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock( 12, - "disk usage exceeded flood-stage watermark, index has read-only-allow-delete block", + "disk usage exceeded flood-stage watermark, index has read-only-allow-delete block; for more information, see " + + ReferenceDocs.FLOOD_STAGE_WATERMARK, false, false, true, @@ -1623,7 +1625,7 @@ private static class IndexMetadataDiff implements Diff { } primaryTerms = in.readVLongArray(); mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MAPPING_DIFF_VALUE_READER); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_FIELDS_METADATA)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { inferenceFields = DiffableUtils.readImmutableOpenMapDiff( in, DiffableUtils.getStringKeySerializer(), @@ -1691,7 +1693,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVLongArray(primaryTerms); mappings.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_FIELDS_METADATA)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { inferenceFields.writeTo(out); } aliases.writeTo(out); @@ -1784,7 +1786,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function builder.putInferenceField(f)); } @@ -1856,7 +1858,7 @@ public void writeTo(StreamOutput out, boolean mappingsAsHash) throws IOException mapping.writeTo(out); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_FIELDS_METADATA)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeCollection(inferenceFields.values()); } out.writeCollection(aliases.values()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index b5ee0ebd7e387..07dcb7baf0777 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -514,7 +513,6 @@ private ClusterState applyCreateIndexWithTemporaryService( ClusterState updated = clusterStateCreateIndex( currentState, - request.blocks(), indexMetadata, metadataTransformer, allocationService.getShardRoutingRoleStrategy() @@ -1231,7 +1229,6 @@ public static List resolveAndValidateAliases( */ static ClusterState clusterStateCreateIndex( ClusterState currentState, - Set clusterBlocks, IndexMetadata indexMetadata, BiConsumer metadataTransformer, ShardRoutingRoleStrategy shardRoutingRoleStrategy @@ -1245,15 +1242,13 @@ static ClusterState clusterStateCreateIndex( newMetadata = currentState.metadata().withAddedIndex(indexMetadata); } - String indexName = indexMetadata.getIndex().getName(); - ClusterBlocks.Builder blocks = createClusterBlocksBuilder(currentState, indexName, clusterBlocks); - blocks.updateBlocks(indexMetadata); + var blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); + blocksBuilder.updateBlocks(indexMetadata); - ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metadata(newMetadata).build(); + var routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, currentState.routingTable()) + .addAsNew(newMetadata.index(indexMetadata.getIndex().getName())); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, updatedState.routingTable()) - .addAsNew(updatedState.metadata().index(indexName)); - return ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(); + return ClusterState.builder(currentState).blocks(blocksBuilder).metadata(newMetadata).routingTable(routingTableBuilder).build(); } static IndexMetadata buildIndexMetadata( @@ -1326,16 +1321,6 @@ private static IndexMetadata.Builder createIndexMetadataBuilder( return builder; } - private static ClusterBlocks.Builder createClusterBlocksBuilder(ClusterState currentState, String index, Set blocks) { - ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); - if (blocks.isEmpty() == false) { - for (ClusterBlock block : blocks) { - blocksBuilder.addIndexBlock(index, block); - } - } - return blocksBuilder; - } - private static void updateIndexMappingsAndBuildSortOrder( IndexService indexService, CreateIndexClusterStateUpdateRequest request, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 7363e71d65c72..9e8a99351d84a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -41,18 +41,18 @@ public class MetadataDataStreamsService { private final ClusterService clusterService; private final IndicesService indicesService; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private final MasterServiceTaskQueue updateLifecycleTaskQueue; private final MasterServiceTaskQueue setRolloverOnWriteTaskQueue; public MetadataDataStreamsService( ClusterService clusterService, IndicesService indicesService, - DataStreamGlobalRetentionResolver globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.indicesService = indicesService; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; ClusterStateTaskExecutor updateLifecycleExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { @Override @@ -214,17 +214,15 @@ static ClusterState modifyDataStream( ClusterState updateDataLifecycle(ClusterState currentState, List dataStreamNames, @Nullable DataStreamLifecycle lifecycle) { Metadata metadata = currentState.metadata(); Metadata.Builder builder = Metadata.builder(metadata); - boolean atLeastOneDataStreamIsNotSystem = false; + boolean onlyInternalDataStreams = true; for (var dataStreamName : dataStreamNames) { var dataStream = validateDataStream(metadata, dataStreamName); builder.put(dataStream.copy().setLifecycle(lifecycle).build()); - atLeastOneDataStreamIsNotSystem = atLeastOneDataStreamIsNotSystem || dataStream.isSystem() == false; + onlyInternalDataStreams = onlyInternalDataStreams && dataStream.isInternal(); } if (lifecycle != null) { - if (atLeastOneDataStreamIsNotSystem) { - // We don't issue any warnings if all data streams are system data streams - lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.resolve(currentState)); - } + // We don't issue any warnings if all data streams are internal data streams + lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get(), onlyInternalDataStreams); } return ClusterState.builder(currentState).metadata(builder.build()).build(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java index 9aebc9a2b810d..a87912f3ffc8d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java @@ -23,11 +23,11 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotInProgressException; import org.elasticsearch.snapshots.SnapshotsService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java index 26a968d1b201f..41ef8ed6aa470 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.CloseUtils; @@ -33,6 +32,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index be6d6f3ef1e53..be12198cbaaaa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -50,7 +50,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -67,6 +66,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.ShardLimitValidator; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotInProgressException; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index e9658e71f895e..ff23f50ef7afe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.HeaderWarning; @@ -55,6 +54,7 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; @@ -137,7 +137,7 @@ public class MetadataIndexTemplateService { private final NamedXContentRegistry xContentRegistry; private final SystemIndices systemIndices; private final Set indexSettingProviders; - private final DataStreamGlobalRetentionResolver globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; /** * This is the cluster state task executor for all template-based actions. @@ -183,7 +183,7 @@ public MetadataIndexTemplateService( NamedXContentRegistry xContentRegistry, SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionResolver globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.taskQueue = clusterService.createTaskQueue("index-templates", Priority.URGENT, TEMPLATE_TASK_EXECUTOR); @@ -193,7 +193,7 @@ public MetadataIndexTemplateService( this.xContentRegistry = xContentRegistry; this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } public void removeTemplates( @@ -345,7 +345,7 @@ public ClusterState addComponentTemplate( tempStateWithComponentTemplateAdded.metadata(), composableTemplateName, composableTemplate, - globalRetentionResolver.resolve(currentState) + globalRetentionSettings.get() ); validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { @@ -369,9 +369,8 @@ public ClusterState addComponentTemplate( } if (finalComponentTemplate.template().lifecycle() != null) { - finalComponentTemplate.template() - .lifecycle() - .addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.resolve(currentState)); + // We do not know if this lifecycle will belong to an internal data stream, so we fall back to a non internal. + finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get(), false); } logger.info("{} component template [{}]", existing == null ? "adding" : "updating", name); @@ -732,7 +731,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); - validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionResolver.resolve(currentState)); + validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionSettings.get()); if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); @@ -817,7 +816,12 @@ static void validateLifecycle( + "] specifies lifecycle configuration that can only be used in combination with a data stream" ); } - lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetention); + if (globalRetention != null) { + // We cannot know for sure if the template will apply to internal data streams, so we use a simpler heuristic: + // If all the index patterns start with a dot, we consider that all the connected data streams are internal. + boolean isInternalDataStream = template.indexPatterns().stream().allMatch(indexPattern -> indexPattern.charAt(0) == '.'); + lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetention, isInternalDataStream); + } } } @@ -1309,7 +1313,12 @@ static List> findV2CandidateTemplates(Met for (Map.Entry entry : metadata.templatesV2().entrySet()) { final String name = entry.getKey(); final ComposableIndexTemplate template = entry.getValue(); - if (isHidden == false) { + /* + * We do not ordinarily return match-all templates for hidden indices. But all backing indices for data streams are hidden, + * and we do want to return even match-all templates for those. Not doing so can result in a situation where a data stream is + * built with a template that none of its indices match. + */ + if (isHidden == false || template.getDataStreamTemplate() != null) { final boolean matched = template.indexPatterns().stream().anyMatch(patternMatchPredicate); if (matched) { candidates.add(Tuple.tuple(name, template)); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index 4ed18489c44b0..1c956c7f78f32 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -33,6 +32,7 @@ import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.injection.guice.Inject; import java.util.ArrayList; import java.util.HashMap; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index 08b0d56da782a..38eefa4085527 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -125,6 +125,14 @@ public boolean contains(String nodeId, SingleNodeShutdownMetadata.Type type) { return get(nodeId, type) != null; } + /** + * Checks if the provided node is scheduled for being permanently removed from the cluster. + */ + public boolean isNodeMarkedForRemoval(String nodeId) { + var singleNodeShutdownMetadata = get(nodeId); + return singleNodeShutdownMetadata != null && singleNodeShutdownMetadata.getType().isRemovalType(); + } + /** * Add or update the shutdown metadata for a single node. * @param nodeShutdownMetadata The single node shutdown metadata to add or update. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 70440adc4ebbe..0a045261e07b8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -70,7 +70,11 @@ public class Template implements SimpleDiffable