diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 4446952fec2bb..720d6a7c2efb6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.gradle.api.Action; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -112,7 +113,6 @@ public void execute(Task t) { test.jvmArgs( "-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"), - "-Djava.security.manager=allow", "-Dtests.testfeatures.enabled=true", "--add-opens=java.base/java.util=ALL-UNNAMED", // TODO: only open these for mockito when it is modularized @@ -127,6 +127,13 @@ public void execute(Task t) { ); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); + test.getJvmArgumentProviders().add(() -> { + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) { + return List.of("-Djava.security.manager=allow"); + } else { + return List.of(); + } + }); String argline = System.getProperty("tests.jvm.argline"); if (argline != null) { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java index 2068ee4447971..2107156902487 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/GradleTestPolicySetupPlugin.java @@ -9,11 +9,14 @@ package org.elasticsearch.gradle.test; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.invocation.Gradle; import org.gradle.api.tasks.testing.Test; +import java.util.List; + public class GradleTestPolicySetupPlugin implements Plugin { @Override @@ -23,8 +26,13 @@ public void apply(Project project) { test.systemProperty("tests.gradle", true); test.systemProperty("tests.task", test.getPath()); - // Flag is required for later Java versions since our tests use a custom security manager - test.jvmArgs("-Djava.security.manager=allow"); + test.getJvmArgumentProviders().add(() -> { + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) { + return List.of("-Djava.security.manager=allow"); + } else { + return List.of(); + } + }); SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); // don't track these as inputs since they contain absolute paths and break cache relocatability diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index b17ad7c87e3ff..fe0f82560894c 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.jdk.RuntimeVersionFeature; import java.io.IOException; import java.nio.file.Files; @@ -137,9 +139,13 @@ private static Stream maybeWorkaroundG1Bug() { return Stream.of(); } + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) private static Stream maybeAllowSecurityManager() { - // Will become conditional on useEntitlements once entitlements can run without SM - return Stream.of("-Djava.security.manager=allow"); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + // Will become conditional on useEntitlements once entitlements can run without SM + return Stream.of("-Djava.security.manager=allow"); + } + return Stream.of(); } private static Stream maybeAttachEntitlementAgent(boolean useEntitlements) { diff --git a/docs/changelog/114618.yaml b/docs/changelog/114618.yaml new file mode 100644 index 0000000000000..ada402fe35742 --- /dev/null +++ b/docs/changelog/114618.yaml @@ -0,0 +1,5 @@ +pr: 114618 +summary: Add a new index setting to skip recovery source when synthetic source is enabled +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/117469.yaml b/docs/changelog/117469.yaml new file mode 100644 index 0000000000000..cfb14f78cb578 --- /dev/null +++ b/docs/changelog/117469.yaml @@ -0,0 +1,6 @@ +pr: 117469 +summary: Handle exceptions in query phase can match +area: Search +type: bug +issues: + - 104994 diff --git a/docs/changelog/118025.yaml b/docs/changelog/118025.yaml new file mode 100644 index 0000000000000..9b615f4d5e621 --- /dev/null +++ b/docs/changelog/118025.yaml @@ -0,0 +1,5 @@ +pr: 118025 +summary: Update sparse text embeddings API route for Inference Service +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/118104.yaml b/docs/changelog/118104.yaml new file mode 100644 index 0000000000000..eb8ac661e9f93 --- /dev/null +++ b/docs/changelog/118104.yaml @@ -0,0 +1,12 @@ +pr: 118104 +summary: Remove old `_knn_search` tech preview API in v9 +area: Vector Search +type: breaking +issues: [] +breaking: + title: Remove old `_knn_search` tech preview API in v9 + area: REST API + details: The original, tech-preview api for vector search, `_knn_search`, has been removed in v9. For all vector search + operations, you should utilize the `_search` endpoint. + impact: The `_knn_search` API is now inaccessible without providing a compatible-with flag for v8. + notable: false diff --git a/docs/changelog/118177.yaml b/docs/changelog/118177.yaml new file mode 100644 index 0000000000000..5201fec3db306 --- /dev/null +++ b/docs/changelog/118177.yaml @@ -0,0 +1,6 @@ +pr: 118177 +summary: Fixing bedrock event executor terminated cache issue +area: Machine Learning +type: bug +issues: + - 117916 diff --git a/docs/changelog/118267.yaml b/docs/changelog/118267.yaml new file mode 100644 index 0000000000000..3e3920caeb0f9 --- /dev/null +++ b/docs/changelog/118267.yaml @@ -0,0 +1,5 @@ +pr: 118267 +summary: Adding get migration reindex status +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/118354.yaml b/docs/changelog/118354.yaml new file mode 100644 index 0000000000000..e2d72db121276 --- /dev/null +++ b/docs/changelog/118354.yaml @@ -0,0 +1,5 @@ +pr: 118354 +summary: Fix log message format bugs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/118378.yaml b/docs/changelog/118378.yaml new file mode 100644 index 0000000000000..d6c388b671968 --- /dev/null +++ b/docs/changelog/118378.yaml @@ -0,0 +1,5 @@ +pr: 118378 +summary: Opt into extra data stream resolution +area: ES|QL +type: bug +issues: [] diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index 0d3e76f71d238..9eb3bf07fbd30 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -475,7 +475,7 @@ The input is untokenized text and the result is the single term attribute emitte - 영영칠 -> 7 - 일영영영 -> 1000 - 삼천2백2십삼 -> 3223 -- 조육백만오천일 -> 1000006005001 +- 일조육백만오천일 -> 1000006005001 - 3.2천 -> 3200 - 1.2만345.67 -> 12345.67 - 4,647.100 -> 4647.1 diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index e6e11d6dd539f..199a59a5b143c 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -232,8 +232,8 @@ it will be set to the length of the first vector added to the field. `index`:: (Optional, Boolean) -If `true`, you can search this field using the <>. Defaults to `true`. +If `true`, you can search this field using the <> +or <> . Defaults to `true`. [[dense-vector-similarity]] `similarity`:: diff --git a/docs/reference/migration/migrate_9_0.asciidoc b/docs/reference/migration/migrate_9_0.asciidoc index 5048220966bba..8f0b16e31b56e 100644 --- a/docs/reference/migration/migrate_9_0.asciidoc +++ b/docs/reference/migration/migrate_9_0.asciidoc @@ -244,6 +244,25 @@ The deprecated highlighting `force_source` parameter is no longer supported. Users should remove usages of the `force_source` parameter from their search requests. ==== +[discrete] +[[breaking_90_transforms_changes]] +==== {transforms-cap} changes + +[[updating_deprecated_transform_roles]] +.Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`) +[%collapsible] +==== +*Details* + +The `data_frame_transforms_admin` and `data_frame_transforms_user` {transform} roles have been deprecated. + +*Impact* + +Users must update any existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) to use the new equivalent {transform} roles (`transform_admin` or `transform_user`). +To update the {transform} roles: + +1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`). +2. Call the <> with that user. +==== + [discrete] [[deprecated-9.0]] diff --git a/docs/reference/migration/migrate_9_0/transforms-migration-guide.asciidoc b/docs/reference/migration/migrate_9_0/transforms-migration-guide.asciidoc deleted file mode 100644 index d41c524d68d5c..0000000000000 --- a/docs/reference/migration/migrate_9_0/transforms-migration-guide.asciidoc +++ /dev/null @@ -1,9 +0,0 @@ -[[transforms-migration-guide]] -== {transforms-cap} migration guide -This migration guide helps you upgrade your {transforms} to work with the 9.0 release. Each section outlines a breaking change and any manual steps needed to upgrade your {transforms} to be compatible with 9.0. - - -=== Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`) -If you have existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) you must update them to use the new equivalent {transform} roles (`transform_admin` or `transform_user`). To update your {transform} roles: -1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`). -2. Call the <> with that user. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 506dff7891ad2..c3bf84fa600d2 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1942,3 +1942,8 @@ Refer to <>. === Delete geoip database configuration API Refer to <>. + +[role="exclude",id="knn-search-api"] +=== Delete _knn_search API + +Refer to <>. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index b39afff876eed..70ffe02e44d95 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -50,8 +50,6 @@ include::search/async-search.asciidoc[] include::search/point-in-time-api.asciidoc[] -include::search/knn-search.asciidoc[] - include::search/retriever.asciidoc[] include::search/rrf.asciidoc[] diff --git a/docs/reference/search/knn-search.asciidoc b/docs/reference/search/knn-search.asciidoc deleted file mode 100644 index 78e3e13b09fee..0000000000000 --- a/docs/reference/search/knn-search.asciidoc +++ /dev/null @@ -1,146 +0,0 @@ -[[knn-search-api]] -=== kNN search API -++++ -kNN search -++++ - -deprecated::[8.4.0,"The kNN search API has been replaced by the <> in the search API."] - -Performs a k-nearest neighbor (kNN) search and returns the matching documents. - -//// -[source,console] ----- -PUT my-index -{ - "mappings": { - "properties": { - "image_vector": { - "type": "dense_vector", - "dims": 3, - "index": true, - "similarity": "l2_norm" - } - } - } -} - -PUT my-index/_doc/1?refresh -{ - "image_vector" : [0.5, 10, 6] -} ----- -//// - -[source,console] ----- -GET my-index/_knn_search -{ - "knn": { - "field": "image_vector", - "query_vector": [0.3, 0.1, 1.2], - "k": 10, - "num_candidates": 100 - }, - "_source": ["name", "file_type"] -} ----- -// TEST[continued] -// TEST[warning:The kNN search API has been replaced by the `knn` option in the search API.] - -[[knn-search-api-request]] -==== {api-request-title} - -`GET /_knn_search` - -`POST /_knn_search` - -[[knn-search-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `read` -<> for the target data stream, index, -or alias. - -[[knn-search-api-desc]] -==== {api-description-title} - -The kNN search API performs a k-nearest neighbor (kNN) search on a -<> field. Given a query vector, it finds the _k_ -closest vectors and returns those documents as search hits. - -//tag::hnsw-algorithm[] -{es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support -efficient kNN search. Like most kNN algorithms, HNSW is an approximate method -that sacrifices result accuracy for improved search speed. This means the -results returned are not always the true _k_ closest neighbors. -//end::hnsw-algorithm[] - -The kNN search API supports restricting the search using a filter. The search -will return the top `k` documents that also match the filter query. - -[[knn-search-api-path-params]] -==== {api-path-parms-title} - -``:: -(Optional, string) Comma-separated list of data streams, indices, and aliases -to search. Supports wildcards (`*`). To search all data streams and indices, -use `*` or `_all`. - -[role="child_attributes"] -[[knn-search-api-query-params]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing] - -[role="child_attributes"] -[[knn-search-api-request-body]] -==== {api-request-body-title} - -`filter`:: -(Optional, <>) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-filter] - -`knn`:: -(Required, object) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn] -+ -.Properties of `knn` object -[%collapsible%open] -==== -`field`:: -(Required, string) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-field] - -`k`:: -(Optional, integer) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-k] - -`num_candidates`:: -(Optional, integer) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] - -`query_vector`:: -(Required, array of floats or string) -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] -==== - -include::{es-ref-dir}/search/search.asciidoc[tag=docvalue-fields-def] -include::{es-ref-dir}/search/search.asciidoc[tag=fields-param-def] -include::{es-ref-dir}/search/search.asciidoc[tag=source-filtering-def] -include::{es-ref-dir}/search/search.asciidoc[tag=stored-fields-def] - -[role="child_attributes"] -[[knn-search-api-response-body]] -==== {api-response-body-title} - -A kNN search response has the exact same structure as a -<>. However, certain sections -have a meaning specific to kNN search: - -* The <> is determined by -the similarity between the query and document vector. See -<>. -* The `hits.total` object contains the total number of nearest neighbor -candidates considered, which is `num_candidates * num_shards`. The -`hits.total.relation` will always be `eq`, indicating an exact value. diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 6fb7f1747051f..59a903b95e4f8 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -1058,8 +1058,10 @@ PUT image-index * When using kNN search in <>, the <> option is not supported. -* {blank} -include::{es-ref-dir}/search/knn-search.asciidoc[tag=hnsw-algorithm] +* {es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support +efficient kNN search. Like most kNN algorithms, HNSW is an approximate method +that sacrifices result accuracy for improved search speed. This means the +results returned are not always the true _k_ closest neighbors. NOTE: Approximate kNN search always uses the <> search type in order to gather diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 58feb55f32e2f..8694d7f5b46c6 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -39,7 +39,7 @@ adjust memory usage in Docker Desktop by going to **Settings > Resources**. ---- docker network create elastic ---- - +// REVIEWED[DEC.10.24] . Pull the {es} Docker image. + -- @@ -52,10 +52,11 @@ endif::[] ---- docker pull {docker-image} ---- +// REVIEWED[DEC.10.24] -- . Optional: Install -https://docs.sigstore.dev/system_config/installation/[Cosign] for your +https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your environment. Then use Cosign to verify the {es} image's signature. + [[docker-verify-signature]] @@ -64,6 +65,7 @@ environment. Then use Cosign to verify the {es} image's signature. wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub {docker-image} ---- +// REVIEWED[DEC.10.24] + The `cosign` command prints the check results and the signature payload in JSON format: + @@ -75,6 +77,7 @@ The following checks were performed on each of these signatures: - Existence of the claims in the transparency log was verified offline - The signatures were verified against the specified public key ---- +// REVIEWED[DEC.10.24] . Start an {es} container. + @@ -82,6 +85,7 @@ The following checks were performed on each of these signatures: ---- docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image} ---- +// REVIEWED[DEC.10.24] + TIP: Use the `-m` flag to set a memory limit for the container. This removes the need to <>. @@ -95,6 +99,7 @@ If you intend to use the {ml} capabilities, then start the container with this c ---- docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" {docker-image} ---- +// REVIEWED[DEC.10.24] The command prints the `elastic` user password and an enrollment token for {kib}. . Copy the generated `elastic` password and enrollment token. These credentials @@ -106,6 +111,7 @@ credentials using the following commands. docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana ---- +// REVIEWED[DEC.10.24] + We recommend storing the `elastic` password as an environment variable in your shell. Example: + @@ -113,6 +119,7 @@ We recommend storing the `elastic` password as an environment variable in your s ---- export ELASTIC_PASSWORD="your_password" ---- +// REVIEWED[DEC.10.24] . Copy the `http_ca.crt` SSL certificate from the container to your local machine. + @@ -120,6 +127,7 @@ export ELASTIC_PASSWORD="your_password" ---- docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . ---- +// REVIEWED[DEC.10.24] . Make a REST API call to {es} to ensure the {es} container is running. + @@ -128,6 +136,7 @@ docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 ---- // NOTCONSOLE +// REVIEWED[DEC.10.24] ===== Add more nodes @@ -137,6 +146,7 @@ curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node ---- +// REVIEWED[DEC.10.24] + The enrollment token is valid for 30 minutes. @@ -146,6 +156,7 @@ The enrollment token is valid for 30 minutes. ---- docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB {docker-image} ---- +// REVIEWED[DEC.10.24] . Call the <> to verify the node was added to the cluster. + @@ -154,6 +165,7 @@ docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB {d curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes ---- // NOTCONSOLE +// REVIEWED[DEC.10.24] [[run-kibana-docker]] ===== Run {kib} @@ -170,6 +182,7 @@ endif::[] ---- docker pull {kib-docker-image} ---- +// REVIEWED[DEC.10.24] -- . Optional: Verify the {kib} image's signature. @@ -179,6 +192,7 @@ docker pull {kib-docker-image} wget https://artifacts.elastic.co/cosign.pub cosign verify --key cosign.pub {kib-docker-image} ---- +// REVIEWED[DEC.10.24] . Start a {kib} container. + @@ -186,6 +200,7 @@ cosign verify --key cosign.pub {kib-docker-image} ---- docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image} ---- +// REVIEWED[DEC.10.24] . When {kib} starts, it outputs a unique generated link to the terminal. To access {kib}, open this link in a web browser. @@ -198,6 +213,7 @@ To regenerate the token, run: ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana ---- +// REVIEWED[DEC.10.24] . Log in to {kib} as the `elastic` user with the password that was generated when you started {es}. @@ -208,6 +224,7 @@ To regenerate the password, run: ---- docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic ---- +// REVIEWED[DEC.10.24] [[remove-containers-docker]] ===== Remove containers @@ -226,6 +243,7 @@ docker rm es02 # Remove the {kib} container docker rm kib01 ---- +// REVIEWED[DEC.10.24] ===== Next steps @@ -306,6 +324,7 @@ ES_PORT=127.0.0.1:9200 ---- docker-compose up -d ---- +// REVIEWED[DEC.10.24] . After the cluster has started, open http://localhost:5601 in a web browser to access {kib}. @@ -321,6 +340,7 @@ is preserved and loaded when you restart the cluster with `docker-compose up`. ---- docker-compose down ---- +// REVIEWED[DEC.10.24] To delete the network, containers, and volumes when you stop the cluster, specify the `-v` option: @@ -329,6 +349,7 @@ specify the `-v` option: ---- docker-compose down -v ---- +// REVIEWED[DEC.10.24] ===== Next steps @@ -377,6 +398,7 @@ The `vm.max_map_count` setting must be set within the xhyve virtual machine: -------------------------------------------- screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty -------------------------------------------- +// REVIEWED[DEC.10.24] . Press enter and use `sysctl` to configure `vm.max_map_count`: + @@ -494,6 +516,7 @@ To check the Docker daemon defaults for ulimits, run: -------------------------------------------- docker run --rm {docker-image} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' -------------------------------------------- +// REVIEWED[DEC.10.24] If needed, adjust them in the Daemon or override them per container. For example, when using `docker run`, set: @@ -502,6 +525,7 @@ For example, when using `docker run`, set: -------------------------------------------- --ulimit nofile=65535:65535 -------------------------------------------- +// REVIEWED[DEC.10.24] ===== Disable swapping @@ -518,6 +542,7 @@ When using `docker run`, you can specify: ---- -e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 ---- +// REVIEWED[DEC.10.24] ===== Randomize published ports @@ -545,6 +570,7 @@ environment variable. For example, to use 1GB, use the following command. ---- docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it {docker-image} ---- +// REVIEWED[DEC.10.24] The `ES_JAVA_OPTS` variable overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in production. @@ -616,6 +642,7 @@ If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify: -------------------------------------------- -e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -------------------------------------------- +// REVIEWED[DEC.10.24] You can override the default command for the image to pass {es} configuration parameters as command line options. For example: diff --git a/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java b/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java new file mode 100644 index 0000000000000..fe6e73271599f --- /dev/null +++ b/libs/core/src/main/java/org/elasticsearch/jdk/RuntimeVersionFeature.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.jdk; + +import org.elasticsearch.core.UpdateForV9; + +public class RuntimeVersionFeature { + private RuntimeVersionFeature() {} + + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // Remove once we removed all references to SecurityManager in code + public static boolean isSecurityManagerAvailable() { + return Runtime.version().feature() < 24; + } +} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index 2fccb27fa6e6c..3277cb8f8e6c7 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -9,6 +9,8 @@ package org.elasticsearch.nativeaccess.jdk; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.VectorLibrary; @@ -25,6 +27,8 @@ public final class JdkVectorLibrary implements VectorLibrary { + static final Logger logger = LogManager.getLogger(JdkVectorLibrary.class); + static final MethodHandle dot7u$mh; static final MethodHandle sqr7u$mh; @@ -36,6 +40,7 @@ public final class JdkVectorLibrary implements VectorLibrary { try { int caps = (int) vecCaps$mh.invokeExact(); + logger.info("vec_caps=" + caps); if (caps != 0) { if (caps == 2) { dot7u$mh = downcallHandle( diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 473a86215e91e..d93afcf84afed 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -28,4 +28,5 @@ tasks.named('forbiddenApisMain').configure { tasks.named("jarHell").configure { enabled = false } tasks.named("testTestingConventions").configure { baseClass 'junit.framework.TestCase' + baseClass 'org.junit.Assert' } diff --git a/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java index 69c6973f57cdf..965696d13613f 100644 --- a/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java +++ b/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java @@ -9,27 +9,43 @@ package org.elasticsearch.secure_sm; -import junit.framework.TestCase; +import com.carrotsearch.randomizedtesting.JUnit3MethodProvider; +import com.carrotsearch.randomizedtesting.RandomizedRunner; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; + +import org.elasticsearch.jdk.RuntimeVersionFeature; +import org.junit.BeforeClass; +import org.junit.runner.RunWith; import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; /** Simple tests for SecureSM */ -public class SecureSMTests extends TestCase { - static { +@TestMethodProviders({ JUnit3MethodProvider.class }) +@RunWith(RandomizedRunner.class) +public class SecureSMTests extends org.junit.Assert { + + @BeforeClass + public static void initialize() { + RandomizedTest.assumeFalse( + "SecurityManager has been permanently removed in JDK 24", + RuntimeVersionFeature.isSecurityManagerAvailable() == false + ); // install a mock security policy: // AllPermission to source code // ThreadPermission not granted anywhere else - final ProtectionDomain sourceCode = SecureSM.class.getProtectionDomain(); + final var sourceCode = Set.of(SecureSM.class.getProtectionDomain(), RandomizedRunner.class.getProtectionDomain()); Policy.setPolicy(new Policy() { @Override public boolean implies(ProtectionDomain domain, Permission permission) { - if (domain == sourceCode) { + if (sourceCode.contains(domain)) { return true; } else if (permission instanceof ThreadPermission) { return false; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 342dc14ec2bf3..71aee00405621 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -124,17 +124,13 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - minimumIntervalExpression = in.readOptionalString(); - } + minimumIntervalExpression = in.readOptionalString(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - out.writeOptionalString(minimumIntervalExpression); - } + out.writeOptionalString(minimumIntervalExpression); } protected AutoDateHistogramAggregationBuilder( diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index 7f894d21bab1a..637efb9d91df7 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -259,6 +259,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index f527dcd42814c..bf693222a4b72 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -1025,7 +1025,7 @@ public void onResponse(Void unused) { // should be no other processes interacting with the repository. logger.warn( Strings.format( - "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + "failed to clean up multipart upload [%s] of blob [%s][%s][%s]", abortMultipartUploadRequest.getUploadId(), blobStore.getRepositoryMetadata().name(), abortMultipartUploadRequest.getBucketName(), diff --git a/muted-tests.yml b/muted-tests.yml index 12b7ea964969d..1b84d4dbe33de 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -274,32 +274,53 @@ tests: - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} issue: https://github.com/elastic/elasticsearch/issues/118217 -- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT - method: testEveryActionIsEitherOperatorOnlyOrNonOperator - issue: https://github.com/elastic/elasticsearch/issues/118220 - class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/118224 - class: org.elasticsearch.packaging.test.ArchiveTests method: test60StartAndStop issue: https://github.com/elastic/elasticsearch/issues/118216 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=migrate/10_reindex/Test Reindex With Bad Data Stream Name} - issue: https://github.com/elastic/elasticsearch/issues/118272 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode} - issue: https://github.com/elastic/elasticsearch/issues/118273 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=migrate/10_reindex/Test Reindex With Nonexistent Data Stream} - issue: https://github.com/elastic/elasticsearch/issues/118274 -- class: org.elasticsearch.index.codec.vectors.es818.ES818HnswBinaryQuantizedVectorsFormatTests - method: testSingleVectorCase - issue: https://github.com/elastic/elasticsearch/issues/118306 - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/118214 - class: org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT method: testTopNThenEnrichRemote issue: https://github.com/elastic/elasticsearch/issues/118307 +- class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS1UnavailableRemotesIT + method: testEsqlRcs1UnavailableRemoteScenarios + issue: https://github.com/elastic/elasticsearch/issues/118350 +- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests + method: testSearcherId + issue: https://github.com/elastic/elasticsearch/issues/118374 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/10_info/Info} + issue: https://github.com/elastic/elasticsearch/issues/118394 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Additional disk information} + issue: https://github.com/elastic/elasticsearch/issues/118395 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Test cat nodes output with full_id set} + issue: https://github.com/elastic/elasticsearch/issues/118396 +- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT + method: test {p0=/11_nodes/Test cat nodes output} + issue: https://github.com/elastic/elasticsearch/issues/118397 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/20_reindex_status/Test get reindex status with nonexistent task id} + issue: https://github.com/elastic/elasticsearch/issues/118401 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Nonexistent Data Stream} + issue: https://github.com/elastic/elasticsearch/issues/118274 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Bad Data Stream Name} + issue: https://github.com/elastic/elasticsearch/issues/118272 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode} + issue: https://github.com/elastic/elasticsearch/issues/118273 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testUnifiedCompletionInference + issue: https://github.com/elastic/elasticsearch/issues/118405 +- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT + method: testEveryActionIsEitherOperatorOnlyOrNonOperator + issue: https://github.com/elastic/elasticsearch/issues/118220 # Examples: # diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java index d98d53baf9015..f907870fc8254 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java @@ -263,7 +263,7 @@ private String getRollupIndexName() throws IOException { if (asMap.size() == 1) { return (String) asMap.keySet().toArray()[0]; } - logger.warn("--> No matching rollup name for path [%s]", endpoint); + logger.warn("--> No matching rollup name for path [{}]", endpoint); return null; } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java index bca0c26ad2c32..b1212913b7fb0 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java @@ -238,7 +238,7 @@ private String getRollupIndexName() throws IOException { if (asMap.size() == 1) { return (String) asMap.keySet().toArray()[0]; } - logger.warn("--> No matching rollup name for path [%s]", endpoint); + logger.warn("--> No matching rollup name for path [{}]", endpoint); return null; } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json new file mode 100644 index 0000000000000..057269598a7d8 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.get_reindex_status.json @@ -0,0 +1,31 @@ +{ + "migrate.get_reindex_status":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API returns the status of a migration reindex attempt for a data stream or index" + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_migration/reindex/{index}/_status", + "methods":[ + "GET" + ], + "parts":{ + "index":{ + "type":"string", + "description":"The index or data stream name" + } + } + } + ] + } + } +} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index b3d86a066550e..78a86e4026b30 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -215,8 +215,11 @@ setup: --- "kNN search in _knn_search endpoint": - skip: - features: [ "allowed_warnings" ] + features: [ "allowed_warnings", "headers" ] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: @@ -240,8 +243,11 @@ setup: - requires: cluster_features: "gte_v8.2.0" reason: 'kNN with filtering added in 8.2' - test_runner_features: [ "allowed_warnings" ] + test_runner_features: [ "allowed_warnings", "headers" ] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: @@ -262,6 +268,9 @@ setup: - match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml index db0437637fc20..81e6a9f91c101 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/50_dense_vector_field_usage.yml @@ -55,6 +55,9 @@ setup: reason: 'dense_vector field usage was added in 8.1' test_runner_features: ["allowed_warnings"] - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "The kNN search API has been replaced by the `knn` option in the search API." knn_search: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index b6930d06c11ec..47f96aebacd7d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.test.ESIntegTestCase; @@ -26,6 +27,7 @@ import static org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.assertNoResizeSourceIndexSettings; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -143,6 +145,51 @@ public void testResizeChangeSyntheticSource() { assertThat(error.getMessage(), containsString("can't change setting [index.mapping.source.mode] during resize")); } + public void testResizeChangeRecoveryUseSyntheticSource() { + prepareCreate("source").setSettings( + indexSettings(between(1, 5), 0).put("index.mode", "logsdb") + .put( + "index.version.created", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY, + IndexVersion.current() + ) + ) + ).setMapping("@timestamp", "type=date", "host.name", "type=keyword").get(); + updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source"); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + indicesAdmin().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings( + Settings.builder() + .put( + "index.version.created", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY, + IndexVersion.current() + ) + ) + .put("index.recovery.use_synthetic_source", true) + .put("index.mode", "logsdb") + .putNull("index.blocks.write") + .build() + ) + .get(); + }); + // The index.recovery.use_synthetic_source setting requires either index.mode or index.mapping.source.mode + // to be present in the settings. Since these are all unmodifiable settings with a non-deterministic evaluation + // order, any of them may trigger a failure first. + assertThat( + error.getMessage(), + anyOf( + containsString("can't change setting [index.mode] during resize"), + containsString("can't change setting [index.recovery.use_synthetic_source] during resize") + ) + ); + } + public void testResizeChangeIndexSorts() { prepareCreate("source").setSettings(indexSettings(between(1, 5), 0)) .setMapping("@timestamp", "type=date", "host.name", "type=keyword") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 1c5d67d1fa40a..70689dc689673 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -336,7 +336,7 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); final Map currentRetentionLeases = new LinkedHashMap<>(); - logger.info("adding retention [{}}] leases", length); + logger.info("adding retention [{}] leases", length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 6ffd5808cea73..870947db5bd85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -715,7 +715,15 @@ public void testShardChangesWithDefaultDocType() throws Exception { } IndexShard shard = indexService.getShard(0); try ( - Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot("test", 0, numOps - 1, true, randomBoolean(), randomBoolean()); + Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot( + "test", + 0, + numOps - 1, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ); Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot() ) { List opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 73359078908e7..7d4269550bb88 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -156,7 +156,6 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.indices.IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; -import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; @@ -257,7 +256,7 @@ private void assertOnGoingRecoveryState( public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) { return Settings.builder() // Set the chunk size in bytes - .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) // Set one chunk of bytes per second. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES); } @@ -280,7 +279,7 @@ private void unthrottleRecovery() { Settings.builder() // 200mb is an arbitrary number intended to be large enough to avoid more throttling. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb") - .put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 38eef4f720623..ca2ff69ac9b17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -24,7 +24,7 @@ import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest; -import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -41,7 +41,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -52,7 +51,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class); } /** @@ -63,7 +62,11 @@ protected Collection> nodePlugins() { */ public void testCancelRecoveryAndResume() throws Exception { updateClusterSettings( - Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) + Settings.builder() + .put( + RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), + new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES) + ) ); NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index b490c7efd52cd..4ba06a34ca3a7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -809,6 +809,24 @@ public void testRestoreChangeSyntheticSource() { assertThat(error.getMessage(), containsString("cannot modify setting [index.mapping.source.mode] on restore")); } + public void testRestoreChangeRecoveryUseSyntheticSource() { + Client client = client(); + createRepository("test-repo", "fs"); + String indexName = "test-idx"; + assertAcked(client.admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(indexSettings()))); + createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName)); + cluster().wipeIndices(indexName); + var error = expectThrows(SnapshotRestoreException.class, () -> { + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndexSettings(Settings.builder().put("index.recovery.use_synthetic_source", true)) + .setWaitForCompletion(true) + .get(); + }); + assertThat(error.getMessage(), containsString("cannot modify setting [index.recovery.use_synthetic_source] on restore")); + } + public void testRestoreChangeIndexSorts() { Client client = client(); createRepository("test-repo", "fs"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index e5e641bfdda21..755ee960be73e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -524,6 +524,15 @@ public void testSnapshotShutdownProgressTracker() throws Exception { "Pause signals have been set for all shard snapshots on data node [" + nodeForRemovalId + "]" ) ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "SnapshotShutdownProgressTracker index shard snapshot status messages", + SnapshotShutdownProgressTracker.class.getCanonicalName(), + Level.INFO, + // Expect the shard snapshot to stall in data file upload, since we've blocked the data node file upload to the blob store. + "statusDescription='enqueued file snapshot tasks: threads running concurrent file uploads'" + ) + ); putShutdownForRemovalMetadata(nodeForRemoval, clusterService); @@ -583,6 +592,14 @@ public void testSnapshotShutdownProgressTracker() throws Exception { "Current active shard snapshot stats on data node [" + nodeForRemovalId + "]*Paused [" + numShards + "]" ) ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "SnapshotShutdownProgressTracker index shard snapshot messages", + SnapshotShutdownProgressTracker.class.getCanonicalName(), + Level.INFO, + "statusDescription='finished: master notification attempt complete'" + ) + ); // Release the master node to respond snapshotStatusUpdateLatch.countDown(); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 6a881163914e4..7b2f0c2c894be 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -21,6 +21,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.nativeaccess.NativeAccess; @@ -722,6 +723,9 @@ public final BootstrapCheckResult check(BootstrapContext context) { } boolean isAllPermissionGranted() { + if (RuntimeVersionFeature.isSecurityManagerAvailable() == false) { + return false; + } final SecurityManager sm = System.getSecurityManager(); assert sm != null; try { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 27cbb39c05d38..ae59f6578f03a 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -35,6 +35,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsProbe; @@ -43,6 +44,8 @@ import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.PluginsLoader; +import org.elasticsearch.rest.MethodHandlers; +import org.elasticsearch.transport.RequestHandlerRegistry; import java.io.IOException; import java.io.InputStream; @@ -113,12 +116,14 @@ private static Bootstrap initPhase1() { * the presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy). * This forces such policies to take effect immediately. */ - org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() { - @Override - public void checkPermission(Permission perm) { - // grant all permissions so that we can later set the security manager to the one that we want - } - }); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() { + @Override + public void checkPermission(Permission perm) { + // grant all permissions so that we can later set the security manager to the one that we want + } + }); + } LogConfigurator.registerErrorListener(); BootstrapInfo.init(); @@ -198,7 +203,11 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { SubscribableListener.class, RunOnce.class, // We eagerly initialize to work around log4j permissions & JDK-8309727 - VectorUtil.class + VectorUtil.class, + // RequestHandlerRegistry and MethodHandlers classes do nontrivial static initialization which should always succeed but load + // it now (before SM) to be sure + RequestHandlerRegistry.class, + MethodHandlers.class ); // load the plugin Java modules and layers now for use in entitlements @@ -215,7 +224,7 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { .toList(); EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); - } else { + } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // install SM after natives, shutdown hooks, etc. LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager"); org.elasticsearch.bootstrap.Security.configure( @@ -223,6 +232,8 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), args.pidFile() ); + } else { + LogManager.getLogger(Elasticsearch.class).warn("Bootstrapping without any protection"); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index 06ec8abf60ff4..172fa34e14ecb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -93,7 +93,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 52e4d75ac5116..75c2c06f36c8e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1591,6 +1591,7 @@ static void validateCloneIndex( private static final Set UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of( IndexSettings.MODE.getKey(), SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 16af7ca2915d4..a01571b8c237d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -257,6 +257,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE, + RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE, RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_READ_SETTING, RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_WRITE_SETTING, RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 884ce38fba391..fc8f128e92f32 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -188,6 +188,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING, IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING, + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 5bea838f9d70c..8f0373d951319 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -51,6 +52,7 @@ import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; +import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING; /** * This class encapsulates all index level settings and handles settings updates. @@ -653,6 +655,62 @@ public Iterator> settings() { Property.Final ); + public static final Setting RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting( + "index.recovery.use_synthetic_source", + false, + new Setting.Validator<>() { + @Override + public void validate(Boolean value) {} + + @Override + public void validate(Boolean enabled, Map, Object> settings) { + if (enabled == false) { + return; + } + + // Verify if synthetic source is enabled on the index; fail if it is not + var indexMode = (IndexMode) settings.get(MODE); + if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) { + var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING); + if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].", + RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), + INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + SourceFieldMapper.Mode.SYNTHETIC.name(), + sourceMode.name() + ) + ); + } + } + + // Verify that all nodes can handle this setting + var version = (IndexVersion) settings.get(SETTING_INDEX_VERSION_CREATED); + if (version.before(IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY)) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "The setting [%s] is unavailable on this cluster because some nodes are running older " + + "versions that do not support it. Please upgrade all nodes to the latest version " + + "and try again.", + RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ); + } + } + + @Override + public Iterator> settings() { + List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, SETTING_INDEX_VERSION_CREATED, MODE); + return res.iterator(); + } + }, + Property.IndexScope, + Property.Final + ); + /** * Returns true if TSDB encoding is enabled. The default is true */ @@ -824,6 +882,7 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile boolean skipIgnoredSourceRead; private final SourceFieldMapper.Mode indexMappingSourceMode; private final boolean recoverySourceEnabled; + private final boolean recoverySourceSyntheticEnabled; /** * The maximum number of refresh listeners allows on this shard. @@ -984,8 +1043,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING); skipIgnoredSourceWrite = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING); skipIgnoredSourceRead = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING); - indexMappingSourceMode = scopedSettings.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING); + indexMappingSourceMode = scopedSettings.get(INDEX_MAPPER_SOURCE_MODE_SETTING); recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings); + recoverySourceSyntheticEnabled = scopedSettings.get(RECOVERY_USE_SYNTHETIC_SOURCE_SETTING); scopedSettings.addSettingsUpdateConsumer( MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, @@ -1677,6 +1737,13 @@ public boolean isRecoverySourceEnabled() { return recoverySourceEnabled; } + /** + * @return Whether recovery source should always be bypassed in favor of using synthetic source. + */ + public boolean isRecoverySourceSyntheticEnabled() { + return recoverySourceSyntheticEnabled; + } + /** * The bounds for {@code @timestamp} on this index or * {@code null} if there are no bounds. diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6344aa2a72ca9..96a70c3cc432b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -136,6 +136,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(9_003_00_0, Version.LUCENE_10_0_0); + public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY = def(9_004_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDocValues.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDocValues.java index 48fc76063f815..190a1ed8b457a 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDocValues.java @@ -24,6 +24,7 @@ final class CombinedDocValues { private final NumericDocValues primaryTermDV; private final NumericDocValues tombstoneDV; private final NumericDocValues recoverySource; + private final NumericDocValues recoverySourceSize; CombinedDocValues(LeafReader leafReader) throws IOException { this.versionDV = Objects.requireNonNull(leafReader.getNumericDocValues(VersionFieldMapper.NAME), "VersionDV is missing"); @@ -34,6 +35,7 @@ final class CombinedDocValues { ); this.tombstoneDV = leafReader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); this.recoverySource = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_NAME); + this.recoverySourceSize = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_SIZE_NAME); } long docVersion(int segmentDocId) throws IOException { @@ -79,4 +81,12 @@ boolean hasRecoverySource(int segmentDocId) throws IOException { assert recoverySource.docID() < segmentDocId; return recoverySource.advanceExact(segmentDocId); } + + long recoverySourceSize(int segmentDocId) throws IOException { + if (recoverySourceSize == null) { + return -1; + } + assert recoverySourceSize.docID() < segmentDocId; + return recoverySourceSize.advanceExact(segmentDocId) ? recoverySourceSize.longValue() : -1; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index edafa1ca922fb..dcdff09191667 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -937,14 +937,15 @@ public boolean allowSearchIdleOptimization() { * @param source the source of the request * @param fromSeqNo the start sequence number (inclusive) * @param toSeqNo the end sequence number (inclusive) - * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean) + * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long) */ public abstract int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException; /** - * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive). - * This feature requires soft-deletes enabled. If soft-deletes are disabled, this method will throw an {@link IllegalStateException}. + * @deprecated This method is deprecated will and be removed once #114618 is applied to the serverless repository. + * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long) */ + @Deprecated public abstract Translog.Snapshot newChangesSnapshot( String source, long fromSeqNo, @@ -954,6 +955,23 @@ public abstract Translog.Snapshot newChangesSnapshot( boolean accessStats ) throws IOException; + /** + * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive). + * This feature requires soft-deletes enabled. If soft-deletes are disabled, this method will throw an {@link IllegalStateException}. + */ + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + long maxChunkSize + ) throws IOException { + // TODO: Remove this default implementation once the deprecated newChangesSnapshot is removed + return newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats); + } + /** * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 8d43252d178ee..fe310dc45c94c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2709,7 +2709,10 @@ private IndexWriterConfig getIndexWriterConfig() { // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes. iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); mergePolicy = new RecoverySourcePruneMergePolicy( - SourceFieldMapper.RECOVERY_SOURCE_NAME, + engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled() ? null : SourceFieldMapper.RECOVERY_SOURCE_NAME, + engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled() + ? SourceFieldMapper.RECOVERY_SOURCE_SIZE_NAME + : SourceFieldMapper.RECOVERY_SOURCE_NAME, engineConfig.getIndexSettings().getMode() == IndexMode.TIME_SERIES, softDeletesPolicy::getRetentionQuery, new SoftDeletesRetentionMergePolicy( @@ -3141,6 +3144,19 @@ public Translog.Snapshot newChangesSnapshot( boolean requiredFullRange, boolean singleConsumer, boolean accessStats + ) throws IOException { + return newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats, -1); + } + + @Override + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + long maxChunkSize ) throws IOException { if (enableRecoverySource == false) { throw new IllegalStateException( @@ -3153,16 +3169,31 @@ public Translog.Snapshot newChangesSnapshot( refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { - LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( - searcher, - LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, - fromSeqNo, - toSeqNo, - requiredFullRange, - singleConsumer, - accessStats, - config().getIndexSettings().getIndexVersionCreated() - ); + final Translog.Snapshot snapshot; + if (engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) { + snapshot = new LuceneSyntheticSourceChangesSnapshot( + engineConfig.getMapperService().mappingLookup(), + searcher, + SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE, + maxChunkSize, + fromSeqNo, + toSeqNo, + requiredFullRange, + accessStats, + config().getIndexSettings().getIndexVersionCreated() + ); + } else { + snapshot = new LuceneChangesSnapshot( + searcher, + SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE, + fromSeqNo, + toSeqNo, + requiredFullRange, + singleConsumer, + accessStats, + config().getIndexSettings().getIndexVersionCreated() + ); + } searcher = null; return snapshot; } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index e44b344d3b283..d4466cbc17c54 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -10,61 +10,33 @@ package org.elasticsearch.index.engine; import org.apache.lucene.codecs.StoredFieldsReader; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Assertions; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.transport.Transports; -import java.io.Closeable; import java.io.IOException; import java.util.Comparator; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; /** * A {@link Translog.Snapshot} from changes in a Lucene index */ -final class LuceneChangesSnapshot implements Translog.Snapshot { - static final int DEFAULT_BATCH_SIZE = 1024; - - private final int searchBatchSize; - private final long fromSeqNo, toSeqNo; +public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot { private long lastSeenSeqNo; private int skippedOperations; - private final boolean requiredFullRange; private final boolean singleConsumer; - private final IndexSearcher indexSearcher; private int docIndex = 0; - private final boolean accessStats; - private final int totalHits; - private ScoreDoc[] scoreDocs; + private int maxDocIndex; private final ParallelArray parallelArray; - private final Closeable onClose; - - private final IndexVersion indexVersionCreated; private int storedFieldsReaderOrd = -1; private StoredFieldsReader storedFieldsReader = null; @@ -83,7 +55,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { * @param accessStats true if the stats of the snapshot can be accessed via {@link #totalOperations()} * @param indexVersionCreated the version on which this index was created */ - LuceneChangesSnapshot( + public LuceneChangesSnapshot( Engine.Searcher engineSearcher, int searchBatchSize, long fromSeqNo, @@ -93,50 +65,26 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { boolean accessStats, IndexVersion indexVersionCreated ) throws IOException { - if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { - throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); - } - if (searchBatchSize <= 0) { - throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); - } - final AtomicBoolean closed = new AtomicBoolean(); - this.onClose = () -> { - if (closed.compareAndSet(false, true)) { - IOUtils.close(engineSearcher); - } - }; - final long requestingSize = (toSeqNo - fromSeqNo) == Long.MAX_VALUE ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L); - this.creationThread = Thread.currentThread(); - this.searchBatchSize = requestingSize < searchBatchSize ? Math.toIntExact(requestingSize) : searchBatchSize; - this.fromSeqNo = fromSeqNo; - this.toSeqNo = toSeqNo; - this.lastSeenSeqNo = fromSeqNo - 1; - this.requiredFullRange = requiredFullRange; + super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); + this.creationThread = Assertions.ENABLED ? Thread.currentThread() : null; this.singleConsumer = singleConsumer; - this.indexSearcher = newIndexSearcher(engineSearcher); - this.indexSearcher.setQueryCache(null); - this.accessStats = accessStats; this.parallelArray = new ParallelArray(this.searchBatchSize); - this.indexVersionCreated = indexVersionCreated; - final TopDocs topDocs = searchOperations(null, accessStats); - this.totalHits = Math.toIntExact(topDocs.totalHits.value()); - this.scoreDocs = topDocs.scoreDocs; - fillParallelArray(scoreDocs, parallelArray); + this.lastSeenSeqNo = fromSeqNo - 1; + final TopDocs topDocs = nextTopDocs(); + this.maxDocIndex = topDocs.scoreDocs.length; + fillParallelArray(topDocs.scoreDocs, parallelArray); } @Override public void close() throws IOException { assert assertAccessingThread(); - onClose.close(); + super.close(); } @Override public int totalOperations() { assert assertAccessingThread(); - if (accessStats == false) { - throw new IllegalStateException("Access stats of a snapshot created with [access_stats] is false"); - } - return totalHits; + return super.totalOperations(); } @Override @@ -146,7 +94,7 @@ public int skippedOperations() { } @Override - public Translog.Operation next() throws IOException { + protected Translog.Operation nextOperation() throws IOException { assert assertAccessingThread(); Translog.Operation op = null; for (int idx = nextDocIndex(); idx != -1; idx = nextDocIndex()) { @@ -155,12 +103,6 @@ public Translog.Operation next() throws IOException { break; } } - if (requiredFullRange) { - rangeCheck(op); - } - if (op != null) { - lastSeenSeqNo = op.seqNo(); - } return op; } @@ -171,48 +113,15 @@ private boolean assertAccessingThread() { return true; } - private void rangeCheck(Translog.Operation op) { - if (op == null) { - if (lastSeenSeqNo < toSeqNo) { - throw new MissingHistoryOperationsException( - "Not all operations between from_seqno [" - + fromSeqNo - + "] " - + "and to_seqno [" - + toSeqNo - + "] found; prematurely terminated last_seen_seqno [" - + lastSeenSeqNo - + "]" - ); - } - } else { - final long expectedSeqNo = lastSeenSeqNo + 1; - if (op.seqNo() != expectedSeqNo) { - throw new MissingHistoryOperationsException( - "Not all operations between from_seqno [" - + fromSeqNo - + "] " - + "and to_seqno [" - + toSeqNo - + "] found; expected seqno [" - + expectedSeqNo - + "]; found [" - + op - + "]" - ); - } - } - } - private int nextDocIndex() throws IOException { // we have processed all docs in the current search - fetch the next batch - if (docIndex == scoreDocs.length && docIndex > 0) { - final ScoreDoc prev = scoreDocs[scoreDocs.length - 1]; - scoreDocs = searchOperations((FieldDoc) prev, false).scoreDocs; + if (docIndex == maxDocIndex && docIndex > 0) { + var scoreDocs = nextTopDocs().scoreDocs; fillParallelArray(scoreDocs, parallelArray); docIndex = 0; + maxDocIndex = scoreDocs.length; } - if (docIndex < scoreDocs.length) { + if (docIndex < maxDocIndex) { int idx = docIndex; docIndex++; return idx; @@ -237,14 +146,13 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray } int docBase = -1; int maxDoc = 0; - List leaves = indexSearcher.getIndexReader().leaves(); int readerIndex = 0; CombinedDocValues combinedDocValues = null; LeafReaderContext leaf = null; for (ScoreDoc scoreDoc : scoreDocs) { if (scoreDoc.doc >= docBase + maxDoc) { do { - leaf = leaves.get(readerIndex++); + leaf = leaves().get(readerIndex++); docBase = leaf.docBase; maxDoc = leaf.reader().maxDoc(); } while (scoreDoc.doc >= docBase + maxDoc); @@ -253,6 +161,7 @@ private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray final int segmentDocID = scoreDoc.doc - docBase; final int index = scoreDoc.shardIndex; parallelArray.leafReaderContexts[index] = leaf; + parallelArray.docID[index] = scoreDoc.doc; parallelArray.seqNo[index] = combinedDocValues.docSeqNo(segmentDocID); parallelArray.primaryTerm[index] = combinedDocValues.docPrimaryTerm(segmentDocID); parallelArray.version[index] = combinedDocValues.docVersion(segmentDocID); @@ -275,16 +184,6 @@ private static boolean hasSequentialAccess(ScoreDoc[] scoreDocs) { return true; } - private static IndexSearcher newIndexSearcher(Engine.Searcher engineSearcher) throws IOException { - return new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); - } - - private static Query rangeQuery(long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) { - return new BooleanQuery.Builder().add(LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo), BooleanClause.Occur.MUST) - .add(Queries.newNonNestedFilter(indexVersionCreated), BooleanClause.Occur.MUST) // exclude non-root nested documents - .build(); - } - static int countOperations(Engine.Searcher engineSearcher, long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) throws IOException { if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { @@ -293,23 +192,9 @@ static int countOperations(Engine.Searcher engineSearcher, long fromSeqNo, long return newIndexSearcher(engineSearcher).count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)); } - private TopDocs searchOperations(FieldDoc after, boolean accurateTotalHits) throws IOException { - final Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated); - assert accurateTotalHits == false || after == null : "accurate total hits is required by the first batch only"; - final SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG); - TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager( - new Sort(sortBySeqNo), - searchBatchSize, - after, - accurateTotalHits ? Integer.MAX_VALUE : 0, - false - ); - return indexSearcher.search(rangeQuery, topFieldCollectorManager); - } - private Translog.Operation readDocAsOp(int docIndex) throws IOException { final LeafReaderContext leaf = parallelArray.leafReaderContexts[docIndex]; - final int segmentDocID = scoreDocs[docIndex].doc - leaf.docBase; + final int segmentDocID = parallelArray.docID[docIndex] - leaf.docBase; final long primaryTerm = parallelArray.primaryTerm[docIndex]; assert primaryTerm > 0 : "nested child document must be excluded"; final long seqNo = parallelArray.seqNo[docIndex]; @@ -385,19 +270,13 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { + "], op [" + op + "]"; + lastSeenSeqNo = op.seqNo(); return op; } - private static boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { - final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); - if (ndv == null || ndv.advanceExact(segmentDocId) == false) { - throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); - } - return ndv.longValue() == 1; - } - private static final class ParallelArray { final LeafReaderContext[] leafReaderContexts; + final int[] docID; final long[] version; final long[] seqNo; final long[] primaryTerm; @@ -406,6 +285,7 @@ private static final class ParallelArray { boolean useSequentialStoredFieldsReader = false; ParallelArray(int size) { + docID = new int[size]; version = new long[size]; seqNo = new long[size]; primaryTerm = new long[size]; diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java new file mode 100644 index 0000000000000..3d3d2f6f66d56 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; +import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.SourceFieldMetrics; +import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +/** + * A {@link SearchBasedChangesSnapshot} that utilizes a synthetic field loader to rebuild the recovery source. + * This snapshot is activated when {@link IndexSettings#RECOVERY_USE_SYNTHETIC_SOURCE_SETTING} + * is enabled on the underlying index. + * + * The {@code maxMemorySizeInBytes} parameter limits the total size of uncompressed _sources + * loaded into memory during batch retrieval. + */ +public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnapshot { + private final long maxMemorySizeInBytes; + private final StoredFieldLoader storedFieldLoader; + private final SourceLoader sourceLoader; + + private int skippedOperations; + private long lastSeenSeqNo; + + private record SearchRecord(FieldDoc doc, boolean isTombstone, long seqNo, long primaryTerm, long version, long size) { + int index() { + return doc.shardIndex; + } + + int docID() { + return doc.doc; + } + + boolean hasRecoverySourceSize() { + return size != -1; + } + } + + private final Deque pendingDocs = new LinkedList<>(); + private final Deque operationQueue = new LinkedList<>(); + + public LuceneSyntheticSourceChangesSnapshot( + MappingLookup mappingLookup, + Engine.Searcher engineSearcher, + int searchBatchSize, + long maxMemorySizeInBytes, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException { + super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated); + assert mappingLookup.isSourceSynthetic(); + // ensure we can buffer at least one document + this.maxMemorySizeInBytes = maxMemorySizeInBytes > 0 ? maxMemorySizeInBytes : 1; + this.sourceLoader = mappingLookup.newSourceLoader(SourceFieldMetrics.NOOP); + Set storedFields = sourceLoader.requiredStoredFields(); + assert mappingLookup.isSourceSynthetic() : "synthetic source must be enabled for proper functionality."; + this.storedFieldLoader = StoredFieldLoader.create(false, storedFields); + this.lastSeenSeqNo = fromSeqNo - 1; + } + + @Override + public int skippedOperations() { + return skippedOperations; + } + + @Override + protected Translog.Operation nextOperation() throws IOException { + while (true) { + if (operationQueue.isEmpty()) { + loadNextBatch(); + } + if (operationQueue.isEmpty()) { + return null; + } + var op = operationQueue.pollFirst(); + if (op.seqNo() == lastSeenSeqNo) { + skippedOperations++; + continue; + } + lastSeenSeqNo = op.seqNo(); + return op; + } + } + + private void loadNextBatch() throws IOException { + List documentsToLoad = new ArrayList<>(); + long accumulatedSize = 0; + while (accumulatedSize < maxMemorySizeInBytes) { + if (pendingDocs.isEmpty()) { + ScoreDoc[] topDocs = nextTopDocs().scoreDocs; + if (topDocs.length == 0) { + break; + } + pendingDocs.addAll(Arrays.asList(transformScoreDocsToRecords(topDocs))); + } + SearchRecord document = pendingDocs.pollFirst(); + document.doc().shardIndex = documentsToLoad.size(); + documentsToLoad.add(document); + accumulatedSize += document.size(); + } + + for (var op : loadDocuments(documentsToLoad)) { + if (op == null) { + skippedOperations++; + continue; + } + operationQueue.add(op); + } + } + + private SearchRecord[] transformScoreDocsToRecords(ScoreDoc[] scoreDocs) throws IOException { + ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(doc -> doc.doc)); + SearchRecord[] documentRecords = new SearchRecord[scoreDocs.length]; + CombinedDocValues combinedDocValues = null; + int docBase = -1; + int maxDoc = 0; + int readerIndex = 0; + LeafReaderContext leafReaderContext; + + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + if (scoreDoc.doc >= docBase + maxDoc) { + do { + leafReaderContext = leaves().get(readerIndex++); + docBase = leafReaderContext.docBase; + maxDoc = leafReaderContext.reader().maxDoc(); + } while (scoreDoc.doc >= docBase + maxDoc); + combinedDocValues = new CombinedDocValues(leafReaderContext.reader()); + } + int segmentDocID = scoreDoc.doc - docBase; + int index = scoreDoc.shardIndex; + var primaryTerm = combinedDocValues.docPrimaryTerm(segmentDocID); + assert primaryTerm > 0 : "nested child document must be excluded"; + documentRecords[index] = new SearchRecord( + (FieldDoc) scoreDoc, + combinedDocValues.isTombstone(segmentDocID), + combinedDocValues.docSeqNo(segmentDocID), + primaryTerm, + combinedDocValues.docVersion(segmentDocID), + combinedDocValues.recoverySourceSize(segmentDocID) + ); + } + return documentRecords; + } + + private Translog.Operation[] loadDocuments(List documentRecords) throws IOException { + documentRecords.sort(Comparator.comparingInt(doc -> doc.docID())); + Translog.Operation[] operations = new Translog.Operation[documentRecords.size()]; + + int docBase = -1; + int maxDoc = 0; + int readerIndex = 0; + LeafReaderContext leafReaderContext = null; + LeafStoredFieldLoader leafFieldLoader = null; + SourceLoader.Leaf leafSourceLoader = null; + for (int i = 0; i < documentRecords.size(); i++) { + SearchRecord docRecord = documentRecords.get(i); + if (docRecord.docID() >= docBase + maxDoc) { + do { + leafReaderContext = leaves().get(readerIndex++); + docBase = leafReaderContext.docBase; + maxDoc = leafReaderContext.reader().maxDoc(); + } while (docRecord.docID() >= docBase + maxDoc); + + leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, null); + leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), null); + } + int segmentDocID = docRecord.docID() - docBase; + leafFieldLoader.advanceTo(segmentDocID); + operations[docRecord.index()] = createOperation(docRecord, leafFieldLoader, leafSourceLoader, segmentDocID, leafReaderContext); + } + return operations; + } + + private Translog.Operation createOperation( + SearchRecord docRecord, + LeafStoredFieldLoader fieldLoader, + SourceLoader.Leaf sourceLoader, + int segmentDocID, + LeafReaderContext context + ) throws IOException { + if (docRecord.isTombstone() && fieldLoader.id() == null) { + assert docRecord.version() == 1L : "Noop tombstone should have version 1L; actual version [" + docRecord.version() + "]"; + assert assertDocSoftDeleted(context.reader(), segmentDocID) : "Noop but soft_deletes field is not set [" + docRecord + "]"; + return new Translog.NoOp(docRecord.seqNo(), docRecord.primaryTerm(), "null"); + } else if (docRecord.isTombstone()) { + assert assertDocSoftDeleted(context.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + docRecord + "]"; + return new Translog.Delete(fieldLoader.id(), docRecord.seqNo(), docRecord.primaryTerm(), docRecord.version()); + } else { + if (docRecord.hasRecoverySourceSize() == false) { + // TODO: Callers should ask for the range that source should be retained. Thus we should always + // check for the existence source once we make peer-recovery to send ops after the local checkpoint. + if (requiredFullRange) { + throw new MissingHistoryOperationsException( + "source not found for seqno=" + docRecord.seqNo() + " from_seqno=" + fromSeqNo + " to_seqno=" + toSeqNo + ); + } else { + skippedOperations++; + return null; + } + } + BytesReference source = sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef(); + return new Translog.Index( + fieldLoader.id(), + docRecord.seqNo(), + docRecord.primaryTerm(), + docRecord.version(), + source, + fieldLoader.routing(), + -1 // autogenerated timestamp + ); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index d4a2fe1b57903..1cca1ed5df6ea 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -356,7 +356,7 @@ public Closeable acquireHistoryRetentionLock() { @Override public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException { - try (Translog.Snapshot snapshot = newChangesSnapshot(source, fromSeqNo, toSeqNo, false, true, true)) { + try (Translog.Snapshot snapshot = newChangesSnapshot(source, fromSeqNo, toSeqNo, false, true, true, -1)) { return snapshot.totalOperations(); } } @@ -369,6 +369,19 @@ public Translog.Snapshot newChangesSnapshot( boolean requiredFullRange, boolean singleConsumer, boolean accessStats + ) throws IOException { + return Translog.Snapshot.EMPTY; + } + + @Override + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + long maxChunkSize ) { return Translog.Snapshot.EMPTY; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 3e99818d1827b..35a2d0b438fe5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -33,17 +33,18 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.search.internal.FilterStoredFieldVisitor; import java.io.IOException; import java.util.Arrays; -import java.util.Objects; import java.util.function.Supplier; final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { RecoverySourcePruneMergePolicy( - String recoverySourceField, + @Nullable String pruneStoredFieldName, + String pruneNumericDVFieldName, boolean pruneIdField, Supplier retainSourceQuerySupplier, MergePolicy in @@ -52,18 +53,19 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { @Override public CodecReader wrapForMerge(CodecReader reader) throws IOException { CodecReader wrapped = toWrap.wrapForMerge(reader); - return wrapReader(recoverySourceField, pruneIdField, wrapped, retainSourceQuerySupplier); + return wrapReader(pruneStoredFieldName, pruneNumericDVFieldName, pruneIdField, wrapped, retainSourceQuerySupplier); } }); } private static CodecReader wrapReader( - String recoverySourceField, + String pruneStoredFieldName, + String pruneNumericDVFieldName, boolean pruneIdField, CodecReader reader, Supplier retainSourceQuerySupplier ) throws IOException { - NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); + NumericDocValues recoverySource = reader.getNumericDocValues(pruneNumericDVFieldName); if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore. } @@ -78,21 +80,35 @@ private static CodecReader wrapReader( if (recoverySourceToKeep.cardinality() == reader.maxDoc()) { return reader; // keep all source } - return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, recoverySourceToKeep); + return new SourcePruningFilterCodecReader( + pruneStoredFieldName, + pruneNumericDVFieldName, + pruneIdField, + reader, + recoverySourceToKeep + ); } else { - return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, null); + return new SourcePruningFilterCodecReader(pruneStoredFieldName, pruneNumericDVFieldName, pruneIdField, reader, null); } } private static class SourcePruningFilterCodecReader extends FilterCodecReader { private final BitSet recoverySourceToKeep; - private final String recoverySourceField; + private final String pruneStoredFieldName; + private final String pruneNumericDVFieldName; private final boolean pruneIdField; - SourcePruningFilterCodecReader(String recoverySourceField, boolean pruneIdField, CodecReader reader, BitSet recoverySourceToKeep) { + SourcePruningFilterCodecReader( + @Nullable String pruneStoredFieldName, + String pruneNumericDVFieldName, + boolean pruneIdField, + CodecReader reader, + BitSet recoverySourceToKeep + ) { super(reader); - this.recoverySourceField = recoverySourceField; + this.pruneStoredFieldName = pruneStoredFieldName; this.recoverySourceToKeep = recoverySourceToKeep; + this.pruneNumericDVFieldName = pruneNumericDVFieldName; this.pruneIdField = pruneIdField; } @@ -103,8 +119,8 @@ public DocValuesProducer getDocValuesReader() { @Override public NumericDocValues getNumeric(FieldInfo field) throws IOException { NumericDocValues numeric = super.getNumeric(field); - if (recoverySourceField.equals(field.name)) { - assert numeric != null : recoverySourceField + " must have numeric DV but was null"; + if (field.name.equals(pruneNumericDVFieldName)) { + assert numeric != null : pruneNumericDVFieldName + " must have numeric DV but was null"; final DocIdSetIterator intersection; if (recoverySourceToKeep == null) { // we can't return null here lucenes DocIdMerger expects an instance @@ -139,10 +155,14 @@ public boolean advanceExact(int target) { @Override public StoredFieldsReader getFieldsReader() { + if (pruneStoredFieldName == null && pruneIdField == false) { + // nothing to prune, we can use the original fields reader + return super.getFieldsReader(); + } return new RecoverySourcePruningStoredFieldsReader( super.getFieldsReader(), recoverySourceToKeep, - recoverySourceField, + pruneStoredFieldName, pruneIdField ); } @@ -241,12 +261,13 @@ private static class RecoverySourcePruningStoredFieldsReader extends FilterStore RecoverySourcePruningStoredFieldsReader( StoredFieldsReader in, BitSet recoverySourceToKeep, - String recoverySourceField, + @Nullable String recoverySourceField, boolean pruneIdField ) { super(in); + assert recoverySourceField != null || pruneIdField : "nothing to prune"; this.recoverySourceToKeep = recoverySourceToKeep; - this.recoverySourceField = Objects.requireNonNull(recoverySourceField); + this.recoverySourceField = recoverySourceField; this.pruneIdField = pruneIdField; } @@ -258,7 +279,7 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { super.document(docID, new FilterStoredFieldVisitor(visitor) { @Override public Status needsField(FieldInfo fieldInfo) throws IOException { - if (recoverySourceField.equals(fieldInfo.name)) { + if (fieldInfo.name.equals(recoverySourceField)) { return Status.NO; } if (pruneIdField && IdFieldMapper.NAME.equals(fieldInfo.name)) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java new file mode 100644 index 0000000000000..191125c59705e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldCollectorManager; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Abstract class that provides a snapshot mechanism to retrieve operations from a live Lucene index + * within a specified range of sequence numbers. Subclasses are expected to define the + * method to fetch the next batch of operations. + */ +public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, Closeable { + public static final int DEFAULT_BATCH_SIZE = 1024; + + private final IndexVersion indexVersionCreated; + private final IndexSearcher indexSearcher; + private final Closeable onClose; + + protected final long fromSeqNo, toSeqNo; + protected final boolean requiredFullRange; + protected final int searchBatchSize; + + private final boolean accessStats; + private final int totalHits; + private FieldDoc afterDoc; + private long lastSeenSeqNo; + + /** + * Constructs a new snapshot for fetching changes within a sequence number range. + * + * @param engineSearcher Engine searcher instance. + * @param searchBatchSize Number of documents to retrieve per batch. + * @param fromSeqNo Starting sequence number. + * @param toSeqNo Ending sequence number. + * @param requiredFullRange Whether the full range is required. + * @param accessStats If true, enable access statistics for counting total operations. + * @param indexVersionCreated Version of the index when it was created. + */ + protected SearchBasedChangesSnapshot( + Engine.Searcher engineSearcher, + int searchBatchSize, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException { + + if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { + throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); + } + if (searchBatchSize <= 0) { + throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); + } + + final AtomicBoolean closed = new AtomicBoolean(); + this.onClose = () -> { + if (closed.compareAndSet(false, true)) { + IOUtils.close(engineSearcher); + } + }; + + this.indexVersionCreated = indexVersionCreated; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + this.lastSeenSeqNo = fromSeqNo - 1; + this.requiredFullRange = requiredFullRange; + this.indexSearcher = newIndexSearcher(engineSearcher); + this.indexSearcher.setQueryCache(null); + + long requestingSize = (toSeqNo - fromSeqNo == Long.MAX_VALUE) ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L); + this.searchBatchSize = (int) Math.min(requestingSize, searchBatchSize); + + this.accessStats = accessStats; + this.totalHits = accessStats ? indexSearcher.count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)) : -1; + } + + /** + * Abstract method for retrieving the next operation. Should be implemented by subclasses. + * + * @return The next Translog.Operation in the snapshot. + * @throws IOException If an I/O error occurs. + */ + protected abstract Translog.Operation nextOperation() throws IOException; + + /** + * Returns the list of index leaf reader contexts. + * + * @return List of LeafReaderContext. + */ + public List leaves() { + return indexSearcher.getIndexReader().leaves(); + } + + @Override + public int totalOperations() { + if (accessStats == false) { + throw new IllegalStateException("Access stats of a snapshot created with [access_stats] is false"); + } + return totalHits; + } + + @Override + public final Translog.Operation next() throws IOException { + Translog.Operation op = nextOperation(); + if (requiredFullRange) { + verifyRange(op); + } + if (op != null) { + assert fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && lastSeenSeqNo < op.seqNo() + : "Unexpected operation; last_seen_seqno [" + + lastSeenSeqNo + + "], from_seqno [" + + fromSeqNo + + "], to_seqno [" + + toSeqNo + + "], op [" + + op + + "]"; + lastSeenSeqNo = op.seqNo(); + } + return op; + } + + @Override + public void close() throws IOException { + onClose.close(); + } + + /** + * Retrieves the next batch of top documents based on the sequence range. + * + * @return TopDocs instance containing the documents in the current batch. + */ + protected TopDocs nextTopDocs() throws IOException { + Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated); + SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG); + + TopFieldCollectorManager collectorManager = new TopFieldCollectorManager( + new Sort(sortBySeqNo), + searchBatchSize, + afterDoc, + 0, + false + ); + TopDocs results = indexSearcher.search(rangeQuery, collectorManager); + + if (results.scoreDocs.length > 0) { + afterDoc = (FieldDoc) results.scoreDocs[results.scoreDocs.length - 1]; + } + for (int i = 0; i < results.scoreDocs.length; i++) { + results.scoreDocs[i].shardIndex = i; + } + return results; + } + + static IndexSearcher newIndexSearcher(Engine.Searcher engineSearcher) throws IOException { + return new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); + } + + static Query rangeQuery(long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) { + return new BooleanQuery.Builder().add(LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo), BooleanClause.Occur.MUST) + .add(Queries.newNonNestedFilter(indexVersionCreated), BooleanClause.Occur.MUST) + .build(); + } + + private void verifyRange(Translog.Operation op) { + if (op == null && lastSeenSeqNo < toSeqNo) { + throw new MissingHistoryOperationsException( + "Not all operations between from_seqno [" + + fromSeqNo + + "] " + + "and to_seqno [" + + toSeqNo + + "] found; prematurely terminated last_seen_seqno [" + + lastSeenSeqNo + + "]" + ); + } else if (op != null && op.seqNo() != lastSeenSeqNo + 1) { + throw new MissingHistoryOperationsException( + "Not all operations between from_seqno [" + + fromSeqNo + + "] " + + "and to_seqno [" + + toSeqNo + + "] found; expected seqno [" + + lastSeenSeqNo + + 1 + + "]; found [" + + op + + "]" + ); + } + } + + protected static boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { + NumericDocValues docValues = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); + if (docValues == null || docValues.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); + } + return docValues.longValue() == 1; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/LeafStoredFieldLoader.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/LeafStoredFieldLoader.java index c8709d3422213..3ed4c856ccc71 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/LeafStoredFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/LeafStoredFieldLoader.java @@ -47,5 +47,4 @@ public interface LeafStoredFieldLoader { * @return stored fields for the current document */ Map> storedFields(); - } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 1c3fb1907a6ea..ecc4b92f369d6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -30,6 +30,7 @@ public class DocumentMapper { private final MapperMetrics mapperMetrics; private final IndexVersion indexVersion; private final Logger logger; + private final String indexName; /** * Create a new {@link DocumentMapper} that holds empty mappings. @@ -67,6 +68,7 @@ public static DocumentMapper createEmpty(MapperService mapperService) { this.mapperMetrics = mapperMetrics; this.indexVersion = version; this.logger = Loggers.getLogger(getClass(), indexName); + this.indexName = indexName; assert mapping.toCompressedXContent().equals(source) || isSyntheticSourceMalformed(source, version) : "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]"; @@ -74,9 +76,9 @@ public static DocumentMapper createEmpty(MapperService mapperService) { private void maybeLog(Exception ex) { if (logger.isDebugEnabled()) { - logger.debug("Error while parsing document: " + ex.getMessage(), ex); + logger.debug("Error while parsing document for index [" + indexName + "]: " + ex.getMessage(), ex); } else if (IntervalThrottler.DOCUMENT_PARSING_FAILURE.accept()) { - logger.info("Error while parsing document: " + ex.getMessage(), ex); + logger.info("Error while parsing document for index [" + indexName + "]: " + ex.getMessage(), ex); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 1cea8154aad43..d491eb9de5886 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -60,6 +60,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static final String NAME = "_source"; public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; + public static final String RECOVERY_SOURCE_SIZE_NAME = "_recovery_source_size"; + public static final String CONTENT_TYPE = "_source"; public static final String LOSSY_PARAMETERS_ALLOWED_SETTING_NAME = "index.lossy.source-mapping-parameters"; @@ -413,8 +415,19 @@ public void preParse(DocumentParserContext context) throws IOException { if (enableRecoverySource && originalSource != null && adaptedSource != originalSource) { // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery BytesRef ref = originalSource.toBytesRef(); - context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); - context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); + if (context.indexSettings().isRecoverySourceSyntheticEnabled()) { + assert isSynthetic() : "recovery source should not be disabled on non-synthetic source"; + /** + * We use synthetic source for recovery, so we omit the recovery source. + * Instead, we record only the size of the uncompressed source. + * This size is used in {@link LuceneSyntheticSourceChangesSnapshot} to control memory + * usage during the recovery process when loading a batch of synthetic sources. + */ + context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_SIZE_NAME, ref.length)); + } else { + context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); + context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f84ac22cd78e4..a76feff84e61b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2600,7 +2600,7 @@ public long getMinRetainedSeqNo() { * @param source the source of the request * @param fromSeqNo the start sequence number (inclusive) * @param toSeqNo the end sequence number (inclusive) - * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean) + * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long) */ public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException { return getEngine().countChanges(source, fromSeqNo, toSeqNo); @@ -2619,6 +2619,7 @@ public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOEx * @param singleConsumer true if the snapshot is accessed by only the thread that creates the snapshot. In this case, the * snapshot can enable some optimizations to improve the performance. * @param accessStats true if the stats of the snapshot is accessed via {@link Translog.Snapshot#totalOperations()} + * @param maxChunkSize The maximum allowable size, in bytes, for buffering source documents during recovery. */ public Translog.Snapshot newChangesSnapshot( String source, @@ -2626,9 +2627,10 @@ public Translog.Snapshot newChangesSnapshot( long toSeqNo, boolean requiredFullRange, boolean singleConsumer, - boolean accessStats + boolean accessStats, + long maxChunkSize ) throws IOException { - return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats); + return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats, maxChunkSize); } public List segments() { diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index f843357e056c4..1143da30c2952 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -81,7 +81,7 @@ public void resync(final IndexShard indexShard, final ActionListener // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Also fail the resync early if the shard is shutting down - snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false, false, true); + snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false, false, true, chunkSize.getBytes()); final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index d8bd460f6f819..6aa6a5e498789 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -98,6 +98,7 @@ public enum AbortStatus { private long processedSize; private String failure; private final SubscribableListener abortListeners = new SubscribableListener<>(); + private volatile String statusDescription; private IndexShardSnapshotStatus( final Stage stage, @@ -110,7 +111,8 @@ private IndexShardSnapshotStatus( final long totalSize, final long processedSize, final String failure, - final ShardGeneration generation + final ShardGeneration generation, + final String statusDescription ) { this.stage = new AtomicReference<>(Objects.requireNonNull(stage)); this.generation = new AtomicReference<>(generation); @@ -124,6 +126,7 @@ private IndexShardSnapshotStatus( this.processedSize = processedSize; this.incrementalSize = incrementalSize; this.failure = failure; + updateStatusDescription(statusDescription); } public synchronized Copy moveToStarted( @@ -272,6 +275,15 @@ public synchronized void addProcessedFiles(int count, long totalSize) { processedSize += totalSize; } + /** + * Updates the string explanation for what the snapshot is actively doing right now. + */ + public void updateStatusDescription(String statusString) { + assert statusString != null; + assert statusString.isEmpty() == false; + this.statusDescription = statusString; + } + /** * Returns a copy of the current {@link IndexShardSnapshotStatus}. This method is * intended to be used when a coherent state of {@link IndexShardSnapshotStatus} is needed. @@ -289,12 +301,13 @@ public synchronized IndexShardSnapshotStatus.Copy asCopy() { incrementalSize, totalSize, processedSize, - failure + failure, + statusDescription ); } public static IndexShardSnapshotStatus newInitializing(ShardGeneration generation) { - return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, null, generation); + return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, null, generation, "initializing"); } public static IndexShardSnapshotStatus.Copy newFailed(final String failure) { @@ -302,7 +315,7 @@ public static IndexShardSnapshotStatus.Copy newFailed(final String failure) { if (failure == null) { throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus"); } - return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null).asCopy(); + return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null, "initialized as failed").asCopy(); } public static IndexShardSnapshotStatus.Copy newDone( @@ -326,7 +339,8 @@ public static IndexShardSnapshotStatus.Copy newDone( size, incrementalSize, null, - generation + generation, + "initialized as done" ).asCopy(); } @@ -345,6 +359,7 @@ public static class Copy { private final long processedSize; private final long incrementalSize; private final String failure; + private final String statusDescription; public Copy( final Stage stage, @@ -356,7 +371,8 @@ public Copy( final long incrementalSize, final long totalSize, final long processedSize, - final String failure + final String failure, + final String statusDescription ) { this.stage = stage; this.startTime = startTime; @@ -368,6 +384,7 @@ public Copy( this.processedSize = processedSize; this.incrementalSize = incrementalSize; this.failure = failure; + this.statusDescription = statusDescription; } public Stage getStage() { @@ -410,6 +427,10 @@ public String getFailure() { return failure; } + public String getStatusDescription() { + return statusDescription; + } + @Override public String toString() { return "index shard snapshot status (" @@ -433,6 +454,8 @@ public String toString() { + processedSize + ", failure='" + failure + + "', statusDescription='" + + statusDescription + '\'' + ')'; } @@ -461,6 +484,8 @@ public String toString() { + processedSize + ", failure='" + failure + + "', statusDescription='" + + statusDescription + '\'' + ')'; } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 1ec187ea4a34b..475f83de9cae3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -399,6 +399,18 @@ public Iterator> settings() { public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); + /** + * The maximum allowable size, in bytes, for buffering source documents during recovery. + */ + public static final Setting INDICES_RECOVERY_CHUNK_SIZE = Setting.byteSizeSetting( + "indices.recovery.chunk_size", + DEFAULT_CHUNK_SIZE, + ByteSizeValue.ZERO, + ByteSizeValue.ofBytes(Integer.MAX_VALUE), + Property.NodeScope, + Property.Dynamic + ); + private volatile ByteSizeValue maxBytesPerSec; private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentOperations; @@ -417,7 +429,7 @@ public Iterator> settings() { private final AdjustableSemaphore maxSnapshotFileDownloadsPerNodeSemaphore; - private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + private volatile ByteSizeValue chunkSize; private final ByteSizeValue availableNetworkBandwidth; private final ByteSizeValue availableDiskReadBandwidth; @@ -444,6 +456,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.availableNetworkBandwidth = NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING.get(settings); this.availableDiskReadBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING.get(settings); this.availableDiskWriteBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_WRITE_SETTING.get(settings); + this.chunkSize = INDICES_RECOVERY_CHUNK_SIZE.get(settings); validateNodeBandwidthRecoverySettings(settings); this.nodeBandwidthSettingsExist = hasNodeBandwidthRecoverySettings(settings); computeMaxBytesPerSec(settings); @@ -493,6 +506,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, this::setMaxConcurrentIncomingRecoveries ); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CHUNK_SIZE, this::setChunkSize); } private void computeMaxBytesPerSec(Settings settings) { @@ -597,7 +611,7 @@ public ByteSizeValue getChunkSize() { return chunkSize; } - public void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests + public void setChunkSize(ByteSizeValue chunkSize) { if (chunkSize.bytesAsInt() <= 0) { throw new IllegalArgumentException("chunkSize must be > 0"); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3603b984fb148..622e56f596e19 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -324,7 +324,8 @@ && isTargetSameHistory() Long.MAX_VALUE, false, false, - true + true, + chunkSizeInBytes ); resources.add(phase2Snapshot); retentionLock.close(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index f1c3d82b74cab..11386eba10196 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3186,6 +3186,7 @@ private void writeAtomic( @Override public void snapshotShard(SnapshotShardContext context) { + context.status().updateStatusDescription("queued in snapshot task runner"); shardSnapshotTaskRunner.enqueueShardSnapshot(context); } @@ -3198,6 +3199,7 @@ private void doSnapshotShard(SnapshotShardContext context) { final ShardId shardId = store.shardId(); final SnapshotId snapshotId = context.snapshotId(); final IndexShardSnapshotStatus snapshotStatus = context.status(); + snapshotStatus.updateStatusDescription("snapshot task runner: setting up shard snapshot"); final long startTime = threadPool.absoluteTimeInMillis(); try { final ShardGeneration generation = snapshotStatus.generation(); @@ -3206,6 +3208,7 @@ private void doSnapshotShard(SnapshotShardContext context) { final Set blobs; if (generation == null) { snapshotStatus.ensureNotAborted(); + snapshotStatus.updateStatusDescription("snapshot task runner: listing blob prefixes"); try { blobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_INDEX_PREFIX).keySet(); } catch (IOException e) { @@ -3216,6 +3219,7 @@ private void doSnapshotShard(SnapshotShardContext context) { } snapshotStatus.ensureNotAborted(); + snapshotStatus.updateStatusDescription("snapshot task runner: loading snapshot blobs"); Tuple tuple = buildBlobStoreIndexShardSnapshots( context.indexId(), shardId.id(), @@ -3316,6 +3320,7 @@ private void doSnapshotShard(SnapshotShardContext context) { indexCommitPointFiles = filesFromSegmentInfos; } + snapshotStatus.updateStatusDescription("snapshot task runner: starting shard snapshot"); snapshotStatus.moveToStarted( startTime, indexIncrementalFileCount, @@ -3342,6 +3347,7 @@ private void doSnapshotShard(SnapshotShardContext context) { BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, Boolean.toString(writeFileInfoWriterUUID) ); + snapshotStatus.updateStatusDescription("snapshot task runner: updating blob store with new shard generation"); INDEX_SHARD_SNAPSHOTS_FORMAT.write( updatedBlobStoreIndexShardSnapshots, shardContainer, @@ -3387,6 +3393,7 @@ private void doSnapshotShard(SnapshotShardContext context) { BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, Boolean.toString(writeFileInfoWriterUUID) ); + snapshotStatus.updateStatusDescription("no shard generations: writing new index-${N} file"); writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots, serializationParams); } catch (IOException e) { throw new IndexShardSnapshotFailedException( @@ -3401,6 +3408,7 @@ private void doSnapshotShard(SnapshotShardContext context) { } snapshotStatus.addProcessedFiles(finalFilesInShardMetadataCount, finalFilesInShardMetadataSize); try { + snapshotStatus.updateStatusDescription("no shard generations: deleting blobs"); deleteFromContainer(OperationPurpose.SNAPSHOT_METADATA, shardContainer, blobsToDelete.iterator()); } catch (IOException e) { logger.warn( @@ -3414,6 +3422,7 @@ private void doSnapshotShard(SnapshotShardContext context) { // filesToSnapshot will be emptied while snapshotting the file. We make a copy here for cleanup purpose in case of failure. final AtomicReference> fileToCleanUp = new AtomicReference<>(List.copyOf(filesToSnapshot)); final ActionListener> allFilesUploadedListener = ActionListener.assertOnce(ActionListener.wrap(ignore -> { + snapshotStatus.updateStatusDescription("all files uploaded: finalizing"); final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(); // now create and write the commit point @@ -3435,6 +3444,7 @@ private void doSnapshotShard(SnapshotShardContext context) { BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, Boolean.toString(writeFileInfoWriterUUID) ); + snapshotStatus.updateStatusDescription("all files uploaded: writing to index shard file"); INDEX_SHARD_SNAPSHOT_FORMAT.write( blobStoreIndexShardSnapshot, shardContainer, @@ -3451,10 +3461,12 @@ private void doSnapshotShard(SnapshotShardContext context) { ByteSizeValue.ofBytes(blobStoreIndexShardSnapshot.totalSize()), getSegmentInfoFileCount(blobStoreIndexShardSnapshot.indexFiles()) ); + snapshotStatus.updateStatusDescription("all files uploaded: done"); snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), shardSnapshotResult); context.onResponse(shardSnapshotResult); }, e -> { try { + snapshotStatus.updateStatusDescription("all files uploaded: cleaning up data files, exception while finalizing: " + e); shardContainer.deleteBlobsIgnoringIfNotExists( OperationPurpose.SNAPSHOT_DATA, Iterators.flatMap(fileToCleanUp.get().iterator(), f -> Iterators.forRange(0, f.numberOfParts(), f::partName)) @@ -3484,12 +3496,10 @@ private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, Ind // A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because // the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen. logger.error( - () -> Strings.format( - "Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]", - shardId, - snapshotId, - shardSnapshotStage - ) + "Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]", + shardId, + snapshotId, + shardSnapshotStage ); assert false; } @@ -3519,6 +3529,7 @@ protected void snapshotFiles( ) { final int noOfFilesToSnapshot = filesToSnapshot.size(); final ActionListener filesListener = fileQueueListener(filesToSnapshot, noOfFilesToSnapshot, allFilesUploadedListener); + context.status().updateStatusDescription("enqueued file snapshot tasks: threads running concurrent file uploads"); for (int i = 0; i < noOfFilesToSnapshot; i++) { shardSnapshotTaskRunner.enqueueFileSnapshot(context, filesToSnapshot::poll, filesListener); } diff --git a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java index 2f53f48f9ae5b..bfb8ca018fca2 100644 --- a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java @@ -22,13 +22,13 @@ /** * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs and versions. */ -final class MethodHandlers { +public final class MethodHandlers { private final String path; private final Map> methodHandlers; @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap - private volatile HttpRouteStatsTracker statsTracker; + private HttpRouteStatsTracker statsTracker; private static final VarHandle STATS_TRACKER_HANDLE; diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java index e552f4058a879..902dd79eb547e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; @@ -34,19 +35,12 @@ public class RestKnnSearchAction extends BaseRestHandler { public RestKnnSearchAction() {} - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_RELEVANCE) - // these routes were ".deprecated" in RestApiVersion.V_8 which will require use of REST API compatibility headers to access - // this API in v9. It is unclear if this was intentional for v9, and the code has been updated to ".deprecateAndKeep" which will - // continue to emit deprecations warnings but will not require any special headers to access the API in v9. - // Please review and update the code and tests as needed. The original code remains commented out below for reference. @Override + @UpdateForV10(owner = UpdateForV10.Owner.SEARCH_RELEVANCE) public List routes() { - return List.of( - // Route.builder(GET, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(), - // Route.builder(POST, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build() - Route.builder(GET, "{index}/_knn_search").deprecateAndKeep(DEPRECATION_MESSAGE).build(), - Route.builder(POST, "{index}/_knn_search").deprecateAndKeep(DEPRECATION_MESSAGE).build() + Route.builder(GET, "{index}/_knn_search").deprecatedForRemoval(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(), + Route.builder(POST, "{index}/_knn_search").deprecatedForRemoval(DEPRECATION_MESSAGE, RestApiVersion.V_8).build() ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index cec1fa8712ec1..b9bd398500c71 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -147,6 +147,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiFunction; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -549,16 +551,17 @@ public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, // check if we can shortcut the query phase entirely. if (orig.canReturnNullResponseIfMatchNoDocs()) { assert orig.scroll() == null; - final CanMatchShardResponse canMatchResp; - try { - ShardSearchRequest clone = new ShardSearchRequest(orig); - canMatchResp = canMatch(clone, false); - } catch (Exception exc) { - l.onFailure(exc); - return; - } + ShardSearchRequest clone = new ShardSearchRequest(orig); + CanMatchContext canMatchContext = new CanMatchContext( + clone, + indicesService::indexServiceSafe, + this::findReaderContext, + defaultKeepAlive, + maxKeepAlive + ); + CanMatchShardResponse canMatchResp = canMatch(canMatchContext, false); if (canMatchResp.canMatch() == false) { - l.onResponse(QuerySearchResult.nullInstance()); + listener.onResponse(QuerySearchResult.nullInstance()); return; } } @@ -1191,10 +1194,14 @@ public void freeAllScrollContexts() { } private long getKeepAlive(ShardSearchRequest request) { + return getKeepAlive(request, defaultKeepAlive, maxKeepAlive); + } + + private static long getKeepAlive(ShardSearchRequest request, long defaultKeepAlive, long maxKeepAlive) { if (request.scroll() != null) { - return getScrollKeepAlive(request.scroll()); + return getScrollKeepAlive(request.scroll(), defaultKeepAlive, maxKeepAlive); } else if (request.keepAlive() != null) { - checkKeepAliveLimit(request.keepAlive().millis()); + checkKeepAliveLimit(request.keepAlive().millis(), maxKeepAlive); return request.keepAlive().getMillis(); } else { return request.readerId() == null ? defaultKeepAlive : -1; @@ -1202,14 +1209,22 @@ private long getKeepAlive(ShardSearchRequest request) { } private long getScrollKeepAlive(Scroll scroll) { + return getScrollKeepAlive(scroll, defaultKeepAlive, maxKeepAlive); + } + + private static long getScrollKeepAlive(Scroll scroll, long defaultKeepAlive, long maxKeepAlive) { if (scroll != null && scroll.keepAlive() != null) { - checkKeepAliveLimit(scroll.keepAlive().millis()); + checkKeepAliveLimit(scroll.keepAlive().millis(), maxKeepAlive); return scroll.keepAlive().getMillis(); } return defaultKeepAlive; } private void checkKeepAliveLimit(long keepAlive) { + checkKeepAliveLimit(keepAlive, maxKeepAlive); + } + + private static void checkKeepAliveLimit(long keepAlive, long maxKeepAlive) { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( "Keep alive for request (" @@ -1620,6 +1635,7 @@ public void canMatch(CanMatchNodeRequest request, ActionListener responses = new ArrayList<>(shardLevelRequests.size()); for (var shardLevelRequest : shardLevelRequests) { try { + // TODO remove the exception handling as it's now in canMatch itself responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatch(request.createShardSearchRequest(shardLevelRequest)))); } catch (Exception e) { responses.add(new CanMatchNodeResponse.ResponseOrFailure(e)); @@ -1631,82 +1647,145 @@ public void canMatch(CanMatchNodeRequest request, ActionListener indexServiceLookup; + private final BiFunction findReaderContext; + private final long defaultKeepAlive; + private final long maxKeepAlive; + + private IndexService indexService; + + CanMatchContext( + ShardSearchRequest request, + Function indexServiceLookup, + BiFunction findReaderContext, + long defaultKeepAlive, + long maxKeepAlive + ) { + this.request = request; + this.indexServiceLookup = indexServiceLookup; + this.findReaderContext = findReaderContext; + this.defaultKeepAlive = defaultKeepAlive; + this.maxKeepAlive = maxKeepAlive; + } + + long getKeepAlive() { + return SearchService.getKeepAlive(request, defaultKeepAlive, maxKeepAlive); + } + + ReaderContext findReaderContext() { + return findReaderContext.apply(request.readerId(), request); + } + + QueryRewriteContext getQueryRewriteContext(IndexService indexService) { + return indexService.newQueryRewriteContext(request::nowInMillis, request.getRuntimeMappings(), request.getClusterAlias()); + } + + SearchExecutionContext getSearchExecutionContext(Engine.Searcher searcher) { + return getIndexService().newSearchExecutionContext( + request.shardId().id(), + 0, + searcher, + request::nowInMillis, + request.getClusterAlias(), + request.getRuntimeMappings() + ); + } + + IndexShard getShard() { + return getIndexService().getShard(request.shardId().getId()); + } + + IndexService getIndexService() { + if (this.indexService == null) { + this.indexService = indexServiceLookup.apply(request.shardId().getIndex()); + } + return this.indexService; + } + } + + static CanMatchShardResponse canMatch(CanMatchContext canMatchContext, boolean checkRefreshPending) { + assert canMatchContext.request.searchType() == SearchType.QUERY_THEN_FETCH + : "unexpected search type: " + canMatchContext.request.searchType(); Releasable releasable = null; try { IndexService indexService; final boolean hasRefreshPending; final Engine.Searcher canMatchSearcher; - if (request.readerId() != null) { + if (canMatchContext.request.readerId() != null) { hasRefreshPending = false; ReaderContext readerContext; Engine.Searcher searcher; try { - readerContext = findReaderContext(request.readerId(), request); - releasable = readerContext.markAsUsed(getKeepAlive(request)); + readerContext = canMatchContext.findReaderContext(); + releasable = readerContext.markAsUsed(canMatchContext.getKeepAlive()); indexService = readerContext.indexService(); - if (canMatchAfterRewrite(request, indexService) == false) { + QueryRewriteContext queryRewriteContext = canMatchContext.getQueryRewriteContext(indexService); + if (queryStillMatchesAfterRewrite(canMatchContext.request, queryRewriteContext) == false) { return new CanMatchShardResponse(false, null); } searcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } catch (SearchContextMissingException e) { - final String searcherId = request.readerId().getSearcherId(); + final String searcherId = canMatchContext.request.readerId().getSearcherId(); if (searcherId == null) { - throw e; + return new CanMatchShardResponse(true, null); } - indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - if (canMatchAfterRewrite(request, indexService) == false) { + if (queryStillMatchesAfterRewrite( + canMatchContext.request, + canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService()) + ) == false) { return new CanMatchShardResponse(false, null); } - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - final Engine.SearcherSupplier searcherSupplier = indexShard.acquireSearcherSupplier(); + final Engine.SearcherSupplier searcherSupplier = canMatchContext.getShard().acquireSearcherSupplier(); if (searcherId.equals(searcherSupplier.getSearcherId()) == false) { searcherSupplier.close(); - throw e; + return new CanMatchShardResponse(true, null); } releasable = searcherSupplier; searcher = searcherSupplier.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } canMatchSearcher = searcher; } else { - indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - if (canMatchAfterRewrite(request, indexService) == false) { + if (queryStillMatchesAfterRewrite( + canMatchContext.request, + canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService()) + ) == false) { return new CanMatchShardResponse(false, null); } - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - boolean needsWaitForRefresh = request.waitForCheckpoint() != UNASSIGNED_SEQ_NO; + boolean needsWaitForRefresh = canMatchContext.request.waitForCheckpoint() != UNASSIGNED_SEQ_NO; // If this request wait_for_refresh behavior, it is safest to assume a refresh is pending. Theoretically, // this can be improved in the future by manually checking that the requested checkpoint has already been refresh. // However, this will request modifying the engine to surface that information. + IndexShard indexShard = canMatchContext.getShard(); hasRefreshPending = needsWaitForRefresh || (indexShard.hasRefreshPending() && checkRefreshPending); canMatchSearcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); } try (canMatchSearcher) { - SearchExecutionContext context = indexService.newSearchExecutionContext( - request.shardId().id(), - 0, - canMatchSearcher, - request::nowInMillis, - request.getClusterAlias(), - request.getRuntimeMappings() - ); - final boolean canMatch = queryStillMatchesAfterRewrite(request, context); - final MinAndMax minMax; + SearchExecutionContext context = canMatchContext.getSearchExecutionContext(canMatchSearcher); + final boolean canMatch = queryStillMatchesAfterRewrite(canMatchContext.request, context); if (canMatch || hasRefreshPending) { - FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(request.source()); - minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null; - } else { - minMax = null; + FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(canMatchContext.request.source()); + final MinAndMax minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null; + return new CanMatchShardResponse(true, minMax); } - return new CanMatchShardResponse(canMatch || hasRefreshPending, minMax); + return new CanMatchShardResponse(false, null); } + } catch (Exception e) { + return new CanMatchShardResponse(true, null); } finally { Releasables.close(releasable); } @@ -1719,15 +1798,6 @@ private CanMatchShardResponse canMatch(ShardSearchRequest request, boolean check * {@link MatchNoneQueryBuilder}. This allows us to avoid extra work for example making the shard search active and waiting for * refreshes. */ - private static boolean canMatchAfterRewrite(final ShardSearchRequest request, final IndexService indexService) throws IOException { - final QueryRewriteContext queryRewriteContext = indexService.newQueryRewriteContext( - request::nowInMillis, - request.getRuntimeMappings(), - request.getClusterAlias() - ); - return queryStillMatchesAfterRewrite(request, queryRewriteContext); - } - @SuppressWarnings("unchecked") public static boolean queryStillMatchesAfterRewrite(ShardSearchRequest request, QueryRewriteContext context) throws IOException { Rewriteable.rewrite(request.getRewriteable(), context, false); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index ded7a5e85f447..25c1486e10ce0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -240,6 +240,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_3_0; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index c629d93a683f6..cc74cba1e2d8a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -370,6 +370,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_3_0; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index bbc3e99bac7ba..449a189e9aa39 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -377,6 +377,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_3_0; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java index 8b0909493173a..75431ac44d200 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java @@ -124,6 +124,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index de241301cfef9..ddb1e3d384fbe 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -160,6 +160,7 @@ public final class RestoreService implements ClusterStateApplier { SETTING_HISTORY_UUID, IndexSettings.MODE.getKey(), SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 234c0239a68ce..90111c44fbd96 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -61,6 +61,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static org.elasticsearch.core.Strings.format; @@ -108,6 +109,7 @@ public SnapshotShardsService( this.threadPool = transportService.getThreadPool(); this.snapshotShutdownProgressTracker = new SnapshotShutdownProgressTracker( () -> clusterService.state().nodes().getLocalNodeId(), + (callerLogger) -> logIndexShardSnapshotStatuses(callerLogger), clusterService.getClusterSettings(), threadPool ); @@ -234,6 +236,14 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } + private void logIndexShardSnapshotStatuses(Logger callerLogger) { + for (var snapshotStatuses : shardSnapshots.values()) { + for (var shardSnapshot : snapshotStatuses.entrySet()) { + callerLogger.info(Strings.format("ShardId %s, %s", shardSnapshot.getKey(), shardSnapshot.getValue())); + } + } + } + /** * Returns status of shards that are snapshotted on the node and belong to the given snapshot *

@@ -321,7 +331,8 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, boolean r sid, ShardState.FAILED, shard.getValue().reason(), - shard.getValue().generation() + shard.getValue().generation(), + () -> null ); } } else { @@ -372,6 +383,7 @@ private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entr + snapshotStatus.generation() + "] for snapshot with old-format compatibility"; shardSnapshotTasks.add(newShardSnapshotTask(shardId, snapshot, indexId, snapshotStatus, entry.version(), entry.startTime())); + snapshotStatus.updateStatusDescription("shard snapshot scheduled to start"); } threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run)); @@ -383,6 +395,7 @@ private void pauseShardSnapshotsForNodeRemoval(String localNodeId, SnapshotsInPr for (final Map.Entry shardEntry : entry.shards().entrySet()) { final ShardId shardId = shardEntry.getKey(); final ShardSnapshotStatus masterShardSnapshotStatus = shardEntry.getValue(); + IndexShardSnapshotStatus indexShardSnapshotStatus = localShardSnapshots.get(shardId); if (masterShardSnapshotStatus.state() != ShardState.INIT) { // shard snapshot not currently scheduled by master @@ -402,7 +415,11 @@ private void pauseShardSnapshotsForNodeRemoval(String localNodeId, SnapshotsInPr shardId, ShardState.PAUSED_FOR_NODE_REMOVAL, "paused", - masterShardSnapshotStatus.generation() + masterShardSnapshotStatus.generation(), + () -> { + indexShardSnapshotStatus.updateStatusDescription("finished: master notification attempt complete"); + return null; + } ); } else { // shard snapshot currently running, mark for pause @@ -419,9 +436,16 @@ private Runnable newShardSnapshotTask( final IndexVersion entryVersion, final long entryStartTime ) { + Supplier postMasterNotificationAction = () -> { + snapshotStatus.updateStatusDescription("finished: master notification attempt complete"); + return null; + }; + + // Listener that runs on completion of the shard snapshot: it will notify the master node of success or failure. ActionListener snapshotResultListener = new ActionListener<>() { @Override public void onResponse(ShardSnapshotResult shardSnapshotResult) { + snapshotStatus.updateStatusDescription("snapshot succeeded: proceeding to notify master of success"); final ShardGeneration newGeneration = shardSnapshotResult.getGeneration(); assert newGeneration != null; assert newGeneration.equals(snapshotStatus.generation()); @@ -436,11 +460,13 @@ public void onResponse(ShardSnapshotResult shardSnapshotResult) { snapshotStatus.generation() ); } - notifySuccessfulSnapshotShard(snapshot, shardId, shardSnapshotResult); + + notifySuccessfulSnapshotShard(snapshot, shardId, shardSnapshotResult, postMasterNotificationAction); } @Override public void onFailure(Exception e) { + snapshotStatus.updateStatusDescription("failed with exception '" + e + ": proceeding to notify master of failure"); final String failure; final Stage nextStage; if (e instanceof AbortedSnapshotException) { @@ -457,7 +483,14 @@ public void onFailure(Exception e) { logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e); } final var shardState = snapshotStatus.moveToUnsuccessful(nextStage, failure, threadPool.absoluteTimeInMillis()); - notifyUnsuccessfulSnapshotShard(snapshot, shardId, shardState, failure, snapshotStatus.generation()); + notifyUnsuccessfulSnapshotShard( + snapshot, + shardId, + shardState, + failure, + snapshotStatus.generation(), + postMasterNotificationAction + ); } }; @@ -508,6 +541,7 @@ private void snapshot( ActionListener resultListener ) { ActionListener.run(resultListener, listener -> { + snapshotStatus.updateStatusDescription("has started"); snapshotStatus.ensureNotAborted(); final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); if (indexShard.routingEntry().primary() == false) { @@ -527,7 +561,9 @@ private void snapshot( final Repository repository = repositoriesService.repository(snapshot.getRepository()); SnapshotIndexCommit snapshotIndexCommit = null; try { + snapshotStatus.updateStatusDescription("acquiring commit reference from IndexShard: triggers a shard flush"); snapshotIndexCommit = new SnapshotIndexCommit(indexShard.acquireIndexCommitForSnapshot()); + snapshotStatus.updateStatusDescription("commit reference acquired, proceeding with snapshot"); final var shardStateId = getShardStateId(indexShard, snapshotIndexCommit.indexCommit()); // not aborted so indexCommit() ok snapshotStatus.addAbortListener(makeAbortListener(indexShard.shardId(), snapshot, snapshotIndexCommit)); snapshotStatus.ensureNotAborted(); @@ -652,8 +688,12 @@ private void syncShardStatsOnNewMaster(List entries) snapshot.snapshot(), shardId ); - notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localShard.getValue().getShardSnapshotResult()); - + notifySuccessfulSnapshotShard( + snapshot.snapshot(), + shardId, + localShard.getValue().getShardSnapshotResult(), + () -> null + ); } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug( @@ -667,7 +707,8 @@ private void syncShardStatsOnNewMaster(List entries) shardId, ShardState.FAILED, indexShardSnapshotStatus.getFailure(), - localShard.getValue().generation() + localShard.getValue().generation(), + () -> null ); } else if (stage == Stage.PAUSED) { // but we think the shard has paused - we need to make new master know that @@ -680,7 +721,8 @@ private void syncShardStatsOnNewMaster(List entries) shardId, ShardState.PAUSED_FOR_NODE_REMOVAL, indexShardSnapshotStatus.getFailure(), - localShard.getValue().generation() + localShard.getValue().generation(), + () -> null ); } } @@ -693,10 +735,20 @@ private void syncShardStatsOnNewMaster(List entries) /** * Notify the master node that the given shard snapshot completed successfully. */ - private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, ShardSnapshotResult shardSnapshotResult) { + private void notifySuccessfulSnapshotShard( + final Snapshot snapshot, + final ShardId shardId, + ShardSnapshotResult shardSnapshotResult, + Supplier postMasterNotificationAction + ) { assert shardSnapshotResult != null; assert shardSnapshotResult.getGeneration() != null; - sendSnapshotShardUpdate(snapshot, shardId, ShardSnapshotStatus.success(clusterService.localNode().getId(), shardSnapshotResult)); + sendSnapshotShardUpdate( + snapshot, + shardId, + ShardSnapshotStatus.success(clusterService.localNode().getId(), shardSnapshotResult), + postMasterNotificationAction + ); } /** @@ -707,13 +759,15 @@ private void notifyUnsuccessfulSnapshotShard( final ShardId shardId, final ShardState shardState, final String failure, - final ShardGeneration generation + final ShardGeneration generation, + Supplier postMasterNotificationAction ) { assert shardState == ShardState.FAILED || shardState == ShardState.PAUSED_FOR_NODE_REMOVAL : shardState; sendSnapshotShardUpdate( snapshot, shardId, - new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure) + new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure), + postMasterNotificationAction ); if (shardState == ShardState.PAUSED_FOR_NODE_REMOVAL) { logger.debug( @@ -726,7 +780,12 @@ private void notifyUnsuccessfulSnapshotShard( } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ - private void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { + private void sendSnapshotShardUpdate( + final Snapshot snapshot, + final ShardId shardId, + final ShardSnapshotStatus status, + Supplier postMasterNotificationAction + ) { ActionListener updateResultListener = new ActionListener<>() { @Override public void onResponse(Void aVoid) { @@ -738,9 +797,11 @@ public void onFailure(Exception e) { logger.warn(() -> format("[%s][%s] failed to update snapshot state to [%s]", shardId, snapshot, status), e); } }; + snapshotShutdownProgressTracker.trackRequestSentToMaster(snapshot, shardId); var releaseTrackerRequestRunsBeforeResultListener = ActionListener.runBefore(updateResultListener, () -> { snapshotShutdownProgressTracker.releaseRequestSentToMaster(snapshot, shardId); + postMasterNotificationAction.get(); }); remoteFailedRequestDeduplicator.executeOnce( diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTracker.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTracker.java index 5d81e3c4e46af..45f2fb96fce4e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTracker.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTracker.java @@ -25,6 +25,7 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -45,6 +46,7 @@ public class SnapshotShutdownProgressTracker { private static final Logger logger = LogManager.getLogger(SnapshotShutdownProgressTracker.class); private final Supplier getLocalNodeId; + private final Consumer logIndexShardSnapshotStatuses; private final ThreadPool threadPool; private volatile TimeValue progressLoggerInterval; @@ -83,8 +85,14 @@ public class SnapshotShutdownProgressTracker { private final AtomicLong abortedCount = new AtomicLong(); private final AtomicLong pausedCount = new AtomicLong(); - public SnapshotShutdownProgressTracker(Supplier localNodeIdSupplier, ClusterSettings clusterSettings, ThreadPool threadPool) { + public SnapshotShutdownProgressTracker( + Supplier localNodeIdSupplier, + Consumer logShardStatuses, + ClusterSettings clusterSettings, + ThreadPool threadPool + ) { this.getLocalNodeId = localNodeIdSupplier; + this.logIndexShardSnapshotStatuses = logShardStatuses; clusterSettings.initializeAndWatch( SNAPSHOT_PROGRESS_DURING_SHUTDOWN_LOG_INTERVAL_SETTING, value -> this.progressLoggerInterval = value @@ -122,14 +130,14 @@ private void cancelProgressLogger() { } /** - * Logs some statistics about shard snapshot progress. + * Logs information about shard snapshot progress. */ private void logProgressReport() { logger.info( """ Current active shard snapshot stats on data node [{}]. \ - Node shutdown cluster state update received at [{}]. \ - Finished signalling shard snapshots to pause at [{}]. \ + Node shutdown cluster state update received at [{} millis]. \ + Finished signalling shard snapshots to pause at [{} millis]. \ Number shard snapshots running [{}]. \ Number shard snapshots waiting for master node reply to status update request [{}] \ Shard snapshot completion stats since shutdown began: Done [{}]; Failed [{}]; Aborted [{}]; Paused [{}]\ @@ -144,6 +152,8 @@ private void logProgressReport() { abortedCount.get(), pausedCount.get() ); + // Use a callback to log the shard snapshot details. + logIndexShardSnapshotStatuses.accept(logger); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index bfe6377b495ab..84a8ee1b2ebbf 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -19,6 +19,8 @@ import org.elasticsearch.telemetry.tracing.Tracer; import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.concurrent.Executor; import static org.elasticsearch.core.Releasables.assertOnce; @@ -33,7 +35,19 @@ public class RequestHandlerRegistry implements private final TaskManager taskManager; private final Tracer tracer; private final Writeable.Reader requestReader; - private final TransportActionStatsTracker statsTracker = new TransportActionStatsTracker(); + @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap + private TransportActionStatsTracker statsTracker; + + private static final VarHandle STATS_TRACKER_HANDLE; + + static { + try { + STATS_TRACKER_HANDLE = MethodHandles.lookup() + .findVarHandle(RequestHandlerRegistry.class, "statsTracker", TransportActionStatsTracker.class); + } catch (Exception e) { + throw new ExceptionInInitializerError(e); + } + } public RequestHandlerRegistry( String action, @@ -118,15 +132,34 @@ public static RequestHandlerRegistry replaceHand } public void addRequestStats(int messageSize) { - statsTracker.addRequestStats(messageSize); + statsTracker().addRequestStats(messageSize); } @Override public void addResponseStats(int messageSize) { - statsTracker.addResponseStats(messageSize); + statsTracker().addResponseStats(messageSize); } public TransportActionStats getStats() { + var statsTracker = existingStatsTracker(); + if (statsTracker == null) { + return TransportActionStats.EMPTY; + } return statsTracker.getStats(); } + + private TransportActionStatsTracker statsTracker() { + var tracker = existingStatsTracker(); + if (tracker == null) { + var newTracker = new TransportActionStatsTracker(); + if ((tracker = (TransportActionStatsTracker) STATS_TRACKER_HANDLE.compareAndExchange(this, null, newTracker)) == null) { + tracker = newTracker; + } + } + return tracker; + } + + private TransportActionStatsTracker existingStatsTracker() { + return (TransportActionStatsTracker) STATS_TRACKER_HANDLE.getAcquire(this); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportActionStats.java b/server/src/main/java/org/elasticsearch/transport/TransportActionStats.java index feed042a5934e..f35443cdc8d6d 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportActionStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportActionStats.java @@ -27,6 +27,8 @@ public record TransportActionStats( long[] responseSizeHistogram ) implements Writeable, ToXContentObject { + public static final TransportActionStats EMPTY = new TransportActionStats(0, 0, new long[0], 0, 0, new long[0]); + public TransportActionStats(StreamInput in) throws IOException { this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray()); } diff --git a/server/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java b/server/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java index 6aa85fb132e28..1660eeee837b3 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.test.ESTestCase; import java.security.AccessControlContext; @@ -27,7 +28,10 @@ public class ESPolicyTests extends ESTestCase { * test restricting privileges to no permissions actually works */ public void testRestrictPrivileges() { - assumeTrue("test requires security manager", System.getSecurityManager() != null); + assumeTrue( + "test requires security manager", + RuntimeVersionFeature.isSecurityManagerAvailable() && System.getSecurityManager() != null + ); try { System.getProperty("user.home"); } catch (SecurityException e) { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/server/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index 1d46bb7be33d5..98a1f577cfa3a 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -50,7 +51,10 @@ public void testEnsureRegularFile() throws IOException { /** can't execute processes */ public void testProcessExecution() throws Exception { - assumeTrue("test requires security manager", System.getSecurityManager() != null); + assumeTrue( + "test requires security manager", + RuntimeVersionFeature.isSecurityManagerAvailable() && System.getSecurityManager() != null + ); try { Runtime.getRuntime().exec("ls"); fail("didn't get expected exception"); diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/es818/ES818HnswBinaryQuantizedVectorsFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/es818/ES818HnswBinaryQuantizedVectorsFormatTests.java index b6ae3199bb896..09304b3ba4c91 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/vectors/es818/ES818HnswBinaryQuantizedVectorsFormatTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/es818/ES818HnswBinaryQuantizedVectorsFormatTests.java @@ -103,7 +103,7 @@ public void testSingleVectorCase() throws Exception { assertEquals(1, td.totalHits.value()); assertTrue(td.scoreDocs[0].score >= 0); // When it's the only vector in a segment, the score should be very close to the true score - assertEquals(trueScore, td.scoreDocs[0].score, 0.0001f); + assertEquals(trueScore, td.scoreDocs[0].score, 0.01f); } } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9d8c5649f0dce..3e3be6a315af2 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -89,6 +89,7 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -3448,7 +3449,7 @@ public void testTranslogReplay() throws IOException { assertThat(indexResult.getVersion(), equalTo(1L)); } assertVisibleCount(engine, numDocs); - translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); + translogHandler = createTranslogHandler(mapperService); engine.close(); // we need to reuse the engine config unless the parser.mappingModified won't work @@ -3460,7 +3461,7 @@ public void testTranslogReplay() throws IOException { assertEquals(numDocs, translogHandler.appliedOperations()); engine.close(); - translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); + translogHandler = createTranslogHandler(mapperService); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); engine.refresh("warm_up"); assertVisibleCount(engine, numDocs, false); @@ -3514,7 +3515,7 @@ public void testTranslogReplay() throws IOException { } engine.close(); - translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); + translogHandler = createTranslogHandler(mapperService); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); engine.refresh("warm_up"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -6447,7 +6448,8 @@ protected void doRun() throws Exception { max, true, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) {} } else { @@ -7673,7 +7675,7 @@ public void testDisableRecoverySource() throws Exception { ) { IllegalStateException exc = expectThrows( IllegalStateException.class, - () -> engine.newChangesSnapshot("test", 0, 1000, true, true, true) + () -> engine.newChangesSnapshot("test", 0, 1000, true, true, true, randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())) ); assertThat(exc.getMessage(), containsString("unavailable")); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index 85ba368165ceb..5863d2f932968 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -10,289 +10,37 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.NoMergePolicy; -import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.IOUtils; -import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.SnapshotMatchers; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.test.IndexSettingsModule; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.LongSupplier; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -public class LuceneChangesSnapshotTests extends EngineTestCase { +public class LuceneChangesSnapshotTests extends SearchBasedChangesSnapshotTests { @Override - protected Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes - .build(); - } - - public void testBasics() throws Exception { - long fromSeqNo = randomNonNegativeLong(); - long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); - // Empty engine - try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, true, randomBoolean(), randomBoolean())) { - IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); - assertThat( - error.getMessage(), - containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") - ); - } - try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, false, randomBoolean(), randomBoolean())) { - assertThat(snapshot, SnapshotMatchers.size(0)); - } - int numOps = between(1, 100); - int refreshedSeqNo = -1; - for (int i = 0; i < numOps; i++) { - String id = Integer.toString(randomIntBetween(i, i + 5)); - ParsedDocument doc = createParsedDoc(id, null, randomBoolean()); - if (randomBoolean()) { - engine.index(indexForDoc(doc)); - } else { - engine.delete(new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get())); - } - if (rarely()) { - if (randomBoolean()) { - engine.flush(); - } else { - engine.refresh("test"); - } - refreshedSeqNo = i; - } - } - if (refreshedSeqNo == -1) { - fromSeqNo = between(0, numOps); - toSeqNo = randomLongBetween(fromSeqNo, numOps * 2); - - Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), - fromSeqNo, - toSeqNo, - false, - randomBoolean(), - randomBoolean(), - IndexVersion.current() - ) - ) { - searcher = null; - assertThat(snapshot, SnapshotMatchers.size(0)); - } finally { - IOUtils.close(searcher); - } - - searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), - fromSeqNo, - toSeqNo, - true, - randomBoolean(), - randomBoolean(), - IndexVersion.current() - ) - ) { - searcher = null; - IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); - assertThat( - error.getMessage(), - containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") - ); - } finally { - IOUtils.close(searcher); - } - } else { - fromSeqNo = randomLongBetween(0, refreshedSeqNo); - toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2); - Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), - fromSeqNo, - toSeqNo, - false, - randomBoolean(), - randomBoolean(), - IndexVersion.current() - ) - ) { - searcher = null; - assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo)); - } finally { - IOUtils.close(searcher); - } - searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), - fromSeqNo, - toSeqNo, - true, - randomBoolean(), - randomBoolean(), - IndexVersion.current() - ) - ) { - searcher = null; - IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); - assertThat( - error.getMessage(), - containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") - ); - } finally { - IOUtils.close(searcher); - } - toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo); - searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), - fromSeqNo, - toSeqNo, - true, - randomBoolean(), - randomBoolean(), - IndexVersion.current() - ) - ) { - searcher = null; - assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); - } finally { - IOUtils.close(searcher); - } - } - // Get snapshot via engine will auto refresh - fromSeqNo = randomLongBetween(0, numOps - 1); - toSeqNo = randomLongBetween(fromSeqNo, numOps - 1); - try ( - Translog.Snapshot snapshot = engine.newChangesSnapshot( - "test", - fromSeqNo, - toSeqNo, - randomBoolean(), - randomBoolean(), - randomBoolean() - ) - ) { - assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); - } - } - - /** - * A nested document is indexed into Lucene as multiple documents. While the root document has both sequence number and primary term, - * non-root documents don't have primary term but only sequence numbers. This test verifies that {@link LuceneChangesSnapshot} - * correctly skip non-root documents and returns at most one operation per sequence number. - */ - public void testSkipNonRootOfNestedDocuments() throws Exception { - Map seqNoToTerm = new HashMap<>(); - List operations = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); - for (Engine.Operation op : operations) { - if (engine.getLocalCheckpointTracker().hasProcessed(op.seqNo()) == false) { - seqNoToTerm.put(op.seqNo(), op.primaryTerm()); - } - applyOperation(engine, op); - if (rarely()) { - engine.refresh("test"); - } - if (rarely()) { - engine.rollTranslogGeneration(); - } - if (rarely()) { - engine.flush(); - } - } - long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); - engine.refresh("test"); - Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); - final boolean accessStats = randomBoolean(); - try ( - Translog.Snapshot snapshot = new LuceneChangesSnapshot( - searcher, - between(1, 100), - 0, - maxSeqNo, - false, - randomBoolean(), - accessStats, - IndexVersion.current() - ) - ) { - if (accessStats) { - assertThat(snapshot.totalOperations(), equalTo(seqNoToTerm.size())); - } - Translog.Operation op; - while ((op = snapshot.next()) != null) { - assertThat(op.toString(), op.primaryTerm(), equalTo(seqNoToTerm.get(op.seqNo()))); - } - assertThat(snapshot.skippedOperations(), equalTo(0)); - } - } - - public void testUpdateAndReadChangesConcurrently() throws Exception { - Follower[] followers = new Follower[between(1, 3)]; - CountDownLatch readyLatch = new CountDownLatch(followers.length + 1); - AtomicBoolean isDone = new AtomicBoolean(); - for (int i = 0; i < followers.length; i++) { - followers[i] = new Follower(engine, isDone, readyLatch); - followers[i].start(); - } - boolean onPrimary = randomBoolean(); - List operations = new ArrayList<>(); - int numOps = frequently() ? scaledRandomIntBetween(1, 1500) : scaledRandomIntBetween(5000, 20_000); - for (int i = 0; i < numOps; i++) { - String id = Integer.toString(randomIntBetween(0, randomBoolean() ? 10 : numOps * 2)); - ParsedDocument doc = createParsedDoc(id, randomAlphaOfLengthBetween(1, 5), randomBoolean()); - final Engine.Operation op; - if (onPrimary) { - if (randomBoolean()) { - op = new Engine.Index(newUid(doc), primaryTerm.get(), doc); - } else { - op = new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get()); - } - } else { - if (randomBoolean()) { - op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean()); - } else { - op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong()); - } - } - operations.add(op); - } - readyLatch.countDown(); - readyLatch.await(); - Randomness.shuffle(operations); - concurrentlyApplyOps(operations, engine); - assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L)); - isDone.set(true); - for (Follower follower : followers) { - follower.join(); - IOUtils.close(follower.engine, follower.engine.store); - } + protected Translog.Snapshot newRandomSnapshot( + MappingLookup mappingLookup, + Engine.Searcher engineSearcher, + int searchBatchSize, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException { + return new LuceneChangesSnapshot( + engineSearcher, + searchBatchSize, + fromSeqNo, + toSeqNo, + requiredFullRange, + singleConsumer, + accessStats, + indexVersionCreated + ); } public void testAccessStoredFieldsSequentially() throws Exception { @@ -319,7 +67,8 @@ public void testAccessStoredFieldsSequentially() throws Exception { between(1, smallBatch), false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { while ((op = snapshot.next()) != null) { @@ -335,7 +84,8 @@ public void testAccessStoredFieldsSequentially() throws Exception { between(20, 100), false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { while ((op = snapshot.next()) != null) { @@ -351,7 +101,8 @@ public void testAccessStoredFieldsSequentially() throws Exception { between(21, 100), false, true, - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { while ((op = snapshot.next()) != null) { @@ -367,7 +118,8 @@ public void testAccessStoredFieldsSequentially() throws Exception { between(21, 100), false, false, - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { while ((op = snapshot.next()) != null) { @@ -377,165 +129,4 @@ public void testAccessStoredFieldsSequentially() throws Exception { } } } - - class Follower extends Thread { - private final InternalEngine leader; - private final InternalEngine engine; - private final TranslogHandler translogHandler; - private final AtomicBoolean isDone; - private final CountDownLatch readLatch; - - Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { - this.leader = leader; - this.isDone = isDone; - this.readLatch = readLatch; - this.translogHandler = new TranslogHandler( - xContentRegistry(), - IndexSettingsModule.newIndexSettings(shardId.getIndexName(), leader.engineConfig.getIndexSettings().getSettings()) - ); - this.engine = createEngine(createStore(), createTempDir()); - } - - void pullOperations(InternalEngine follower) throws IOException { - long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint(); - long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint(); - if (followerCheckpoint < leaderCheckpoint) { - long fromSeqNo = followerCheckpoint + 1; - long batchSize = randomLongBetween(0, 100); - long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint); - try ( - Translog.Snapshot snapshot = leader.newChangesSnapshot( - "test", - fromSeqNo, - toSeqNo, - true, - randomBoolean(), - randomBoolean() - ) - ) { - translogHandler.run(follower, snapshot); - } - } - } - - @Override - public void run() { - try { - readLatch.countDown(); - readLatch.await(); - while (isDone.get() == false - || engine.getLocalCheckpointTracker().getProcessedCheckpoint() < leader.getLocalCheckpointTracker() - .getProcessedCheckpoint()) { - pullOperations(engine); - } - assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); - // have to verify without source since we are randomly testing without _source - List docsWithoutSourceOnFollower = getDocIds(engine, true).stream() - .map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version())) - .toList(); - List docsWithoutSourceOnLeader = getDocIds(leader, true).stream() - .map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version())) - .toList(); - assertThat(docsWithoutSourceOnFollower, equalTo(docsWithoutSourceOnLeader)); - } catch (Exception ex) { - throw new AssertionError(ex); - } - } - } - - private List drainAll(Translog.Snapshot snapshot) throws IOException { - List operations = new ArrayList<>(); - Translog.Operation op; - while ((op = snapshot.next()) != null) { - final Translog.Operation newOp = op; - logger.trace("Reading [{}]", op); - assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]"; - operations.add(newOp); - } - return operations; - } - - public void testOverFlow() throws Exception { - long fromSeqNo = randomLongBetween(0, 5); - long toSeqNo = randomLongBetween(Long.MAX_VALUE - 5, Long.MAX_VALUE); - try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, true, randomBoolean(), randomBoolean())) { - IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); - assertThat( - error.getMessage(), - containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") - ); - } - } - - public void testStats() throws Exception { - try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { - int numOps = between(100, 5000); - long startingSeqNo = randomLongBetween(0, Integer.MAX_VALUE); - List operations = generateHistoryOnReplica( - numOps, - startingSeqNo, - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - applyOperations(engine, operations); - - LongSupplier fromSeqNo = () -> { - if (randomBoolean()) { - return 0L; - } else if (randomBoolean()) { - return startingSeqNo; - } else { - return randomLongBetween(0, startingSeqNo); - } - }; - - LongSupplier toSeqNo = () -> { - final long maxSeqNo = engine.getSeqNoStats(-1).getMaxSeqNo(); - if (randomBoolean()) { - return maxSeqNo; - } else if (randomBoolean()) { - return Long.MAX_VALUE; - } else { - return randomLongBetween(maxSeqNo, Long.MAX_VALUE); - } - }; - // Can't access stats if didn't request it - try ( - Translog.Snapshot snapshot = engine.newChangesSnapshot( - "test", - fromSeqNo.getAsLong(), - toSeqNo.getAsLong(), - false, - randomBoolean(), - false - ) - ) { - IllegalStateException error = expectThrows(IllegalStateException.class, snapshot::totalOperations); - assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false")); - final List translogOps = drainAll(snapshot); - assertThat(translogOps, hasSize(numOps)); - error = expectThrows(IllegalStateException.class, snapshot::totalOperations); - assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false")); - } - // Access stats and operations - try ( - Translog.Snapshot snapshot = engine.newChangesSnapshot( - "test", - fromSeqNo.getAsLong(), - toSeqNo.getAsLong(), - false, - randomBoolean(), - true - ) - ) { - assertThat(snapshot.totalOperations(), equalTo(numOps)); - final List translogOps = drainAll(snapshot); - assertThat(translogOps, hasSize(numOps)); - assertThat(snapshot.totalOperations(), equalTo(numOps)); - } - // Verify count - assertThat(engine.countChanges("test", fromSeqNo.getAsLong(), toSeqNo.getAsLong()), equalTo(numOps)); - } - } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java new file mode 100644 index 0000000000000..2a6c3428d6d45 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; + +import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING; + +public class LuceneSyntheticSourceChangesSnapshotTests extends SearchBasedChangesSnapshotTests { + @Override + protected Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + } + + @Override + protected Translog.Snapshot newRandomSnapshot( + MappingLookup mappingLookup, + Engine.Searcher engineSearcher, + int searchBatchSize, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException { + return new LuceneSyntheticSourceChangesSnapshot( + mappingLookup, + engineSearcher, + searchBatchSize, + randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()), + fromSeqNo, + toSeqNo, + requiredFullRange, + accessStats, + indexVersionCreated + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java index c0e365909429a..74d6e83aff266 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -39,83 +39,99 @@ import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + public class RecoverySourcePruneMergePolicyTests extends ESTestCase { public void testPruneAll() throws IOException { - try (Directory dir = newDirectory()) { - boolean pruneIdField = randomBoolean(); - IndexWriterConfig iwc = newIndexWriterConfig(); - RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy( - "extra_source", - pruneIdField, - MatchNoDocsQuery::new, - newLogMergePolicy() - ); - iwc.setMergePolicy(new ShuffleForcedMergePolicy(mp)); - try (IndexWriter writer = new IndexWriter(dir, iwc)) { - for (int i = 0; i < 20; i++) { - if (i > 0 && randomBoolean()) { - writer.flush(); - } - Document doc = new Document(); - doc.add(new StoredField(IdFieldMapper.NAME, "_id")); - doc.add(new StoredField("source", "hello world")); - doc.add(new StoredField("extra_source", "hello world")); - doc.add(new NumericDocValuesField("extra_source", 1)); - writer.addDocument(doc); - } - writer.forceMerge(1); - writer.commit(); - try (DirectoryReader reader = DirectoryReader.open(writer)) { - StoredFields storedFields = reader.storedFields(); - for (int i = 0; i < reader.maxDoc(); i++) { - Document document = storedFields.document(i); - if (pruneIdField) { - assertEquals(1, document.getFields().size()); - assertEquals("source", document.getFields().get(0).name()); - } else { - assertEquals(2, document.getFields().size()); - assertEquals(IdFieldMapper.NAME, document.getFields().get(0).name()); - assertEquals("source", document.getFields().get(1).name()); + for (boolean pruneIdField : List.of(true, false)) { + for (boolean syntheticRecoverySource : List.of(true, false)) { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy( + syntheticRecoverySource ? null : "extra_source", + syntheticRecoverySource ? "extra_source_size" : "extra_source", + pruneIdField, + MatchNoDocsQuery::new, + newLogMergePolicy() + ); + iwc.setMergePolicy(new ShuffleForcedMergePolicy(mp)); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StoredField(IdFieldMapper.NAME, "_id")); + doc.add(new StoredField("source", "hello world")); + if (syntheticRecoverySource) { + doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000))); + } else { + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + } + writer.addDocument(doc); } - } - assertEquals(1, reader.leaves().size()); - LeafReader leafReader = reader.leaves().get(0).reader(); - NumericDocValues extra_source = leafReader.getNumericDocValues("extra_source"); - if (extra_source != null) { - assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); - } - if (leafReader instanceof CodecReader codecReader && reader instanceof StandardDirectoryReader sdr) { - SegmentInfos segmentInfos = sdr.getSegmentInfos(); - MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges( - segmentInfos, - new MergePolicy.MergeContext() { - @Override - public int numDeletesToMerge(SegmentCommitInfo info) { - return info.info.maxDoc() - 1; + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + StoredFields storedFields = reader.storedFields(); + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = storedFields.document(i); + if (pruneIdField) { + assertEquals(1, document.getFields().size()); + assertEquals("source", document.getFields().get(0).name()); + } else { + assertEquals(2, document.getFields().size()); + assertEquals(IdFieldMapper.NAME, document.getFields().get(0).name()); + assertEquals("source", document.getFields().get(1).name()); } + } - @Override - public int numDeletedDocs(SegmentCommitInfo info) { - return info.info.maxDoc() - 1; - } + assertEquals(1, reader.leaves().size()); + LeafReader leafReader = reader.leaves().get(0).reader(); - @Override - public InfoStream getInfoStream() { - return new NullInfoStream(); - } + NumericDocValues extra_source = leafReader.getNumericDocValues( + syntheticRecoverySource ? "extra_source_size" : "extra_source" + ); + if (extra_source != null) { + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } + if (leafReader instanceof CodecReader codecReader && reader instanceof StandardDirectoryReader sdr) { + SegmentInfos segmentInfos = sdr.getSegmentInfos(); + MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges( + segmentInfos, + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } - @Override - public Set getMergingSegments() { - return Collections.emptySet(); - } + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } + + @Override + public InfoStream getInfoStream() { + return new NullInfoStream(); + } + + @Override + public Set getMergingSegments() { + return Collections.emptySet(); + } + } + ); + // don't wrap if there is nothing to do + assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader)); } - ); - // don't wrap if there is nothing to do - assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader)); + } } } } @@ -123,87 +139,126 @@ public Set getMergingSegments() { } public void testPruneSome() throws IOException { - try (Directory dir = newDirectory()) { - boolean pruneIdField = randomBoolean(); - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setMergePolicy( - new RecoverySourcePruneMergePolicy( - "extra_source", - pruneIdField, - () -> new TermQuery(new Term("even", "true")), - iwc.getMergePolicy() - ) - ); - try (IndexWriter writer = new IndexWriter(dir, iwc)) { - for (int i = 0; i < 20; i++) { - if (i > 0 && randomBoolean()) { - writer.flush(); - } - Document doc = new Document(); - doc.add(new StoredField(IdFieldMapper.NAME, "_id")); - doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES)); - doc.add(new StoredField("source", "hello world")); - doc.add(new StoredField("extra_source", "hello world")); - doc.add(new NumericDocValuesField("extra_source", 1)); - writer.addDocument(doc); - } - writer.forceMerge(1); - writer.commit(); - try (DirectoryReader reader = DirectoryReader.open(writer)) { - assertEquals(1, reader.leaves().size()); - NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); - assertNotNull(extra_source); - StoredFields storedFields = reader.storedFields(); - for (int i = 0; i < reader.maxDoc(); i++) { - Document document = storedFields.document(i); - Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); - assertTrue(collect.contains("source")); - assertTrue(collect.contains("even")); - if (collect.size() == 4) { - assertTrue(collect.contains("extra_source")); - assertTrue(collect.contains(IdFieldMapper.NAME)); - assertEquals("true", document.getField("even").stringValue()); - assertEquals(i, extra_source.nextDoc()); - } else { - assertEquals(pruneIdField ? 2 : 3, document.getFields().size()); + for (boolean pruneIdField : List.of(true, false)) { + for (boolean syntheticRecoverySource : List.of(true, false)) { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setMergePolicy( + new RecoverySourcePruneMergePolicy( + syntheticRecoverySource ? null : "extra_source", + syntheticRecoverySource ? "extra_source_size" : "extra_source", + pruneIdField, + () -> new TermQuery(new Term("even", "true")), + iwc.getMergePolicy() + ) + ); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StoredField(IdFieldMapper.NAME, "_id")); + doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES)); + doc.add(new StoredField("source", "hello world")); + if (syntheticRecoverySource) { + doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000))); + } else { + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + } + writer.addDocument(doc); + } + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + assertEquals(1, reader.leaves().size()); + String extraSourceDVName = syntheticRecoverySource ? "extra_source_size" : "extra_source"; + NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues(extraSourceDVName); + assertNotNull(extra_source); + StoredFields storedFields = reader.storedFields(); + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = storedFields.document(i); + Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); + assertTrue(collect.contains("source")); + assertTrue(collect.contains("even")); + boolean isEven = Boolean.parseBoolean(document.getField("even").stringValue()); + if (isEven) { + assertTrue(collect.contains(IdFieldMapper.NAME)); + assertThat(collect.contains("extra_source"), equalTo(syntheticRecoverySource == false)); + if (extra_source.docID() < i) { + extra_source.advance(i); + } + assertEquals(i, extra_source.docID()); + if (syntheticRecoverySource) { + assertThat(extra_source.longValue(), greaterThan(10L)); + } else { + assertThat(extra_source.longValue(), equalTo(1L)); + } + } else { + assertThat(collect.contains(IdFieldMapper.NAME), equalTo(pruneIdField == false)); + assertFalse(collect.contains("extra_source")); + if (extra_source.docID() < i) { + extra_source.advance(i); + } + assertNotEquals(i, extra_source.docID()); + } + } + if (extra_source.docID() != DocIdSetIterator.NO_MORE_DOCS) { + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } } } - assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); } } } } public void testPruneNone() throws IOException { - try (Directory dir = newDirectory()) { - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", false, MatchAllDocsQuery::new, iwc.getMergePolicy())); - try (IndexWriter writer = new IndexWriter(dir, iwc)) { - for (int i = 0; i < 20; i++) { - if (i > 0 && randomBoolean()) { - writer.flush(); + for (boolean syntheticRecoverySource : List.of(true, false)) { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setMergePolicy( + new RecoverySourcePruneMergePolicy( + syntheticRecoverySource ? null : "extra_source", + syntheticRecoverySource ? "extra_source_size" : "extra_source", + false, + MatchAllDocsQuery::new, + iwc.getMergePolicy() + ) + ); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StoredField("source", "hello world")); + if (syntheticRecoverySource) { + doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000))); + } else { + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + } + writer.addDocument(doc); } - Document doc = new Document(); - doc.add(new StoredField("source", "hello world")); - doc.add(new StoredField("extra_source", "hello world")); - doc.add(new NumericDocValuesField("extra_source", 1)); - writer.addDocument(doc); - } - writer.forceMerge(1); - writer.commit(); - try (DirectoryReader reader = DirectoryReader.open(writer)) { - assertEquals(1, reader.leaves().size()); - NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); - assertNotNull(extra_source); - StoredFields storedFields = reader.storedFields(); - for (int i = 0; i < reader.maxDoc(); i++) { - Document document = storedFields.document(i); - Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); - assertTrue(collect.contains("source")); - assertTrue(collect.contains("extra_source")); - assertEquals(i, extra_source.nextDoc()); + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + assertEquals(1, reader.leaves().size()); + String extraSourceDVName = syntheticRecoverySource ? "extra_source_size" : "extra_source"; + NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues(extraSourceDVName); + assertNotNull(extra_source); + StoredFields storedFields = reader.storedFields(); + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = storedFields.document(i); + Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); + assertTrue(collect.contains("source")); + assertThat(collect.contains("extra_source"), equalTo(syntheticRecoverySource == false)); + assertEquals(i, extra_source.nextDoc()); + } + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); } - assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); } } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java new file mode 100644 index 0000000000000..9cfa7321973a4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshotTests.java @@ -0,0 +1,507 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.NoMergePolicy; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.SnapshotMatchers; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongSupplier; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public abstract class SearchBasedChangesSnapshotTests extends EngineTestCase { + @Override + protected Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes + .build(); + } + + protected abstract Translog.Snapshot newRandomSnapshot( + MappingLookup mappingLookup, + Engine.Searcher engineSearcher, + int searchBatchSize, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean singleConsumer, + boolean accessStats, + IndexVersion indexVersionCreated + ) throws IOException; + + public void testBasics() throws Exception { + long fromSeqNo = randomNonNegativeLong(); + long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); + // Empty engine + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat( + error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") + ); + } + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + false, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int numOps = between(1, 100); + int refreshedSeqNo = -1; + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(i, i + 5)); + ParsedDocument doc = parseDocument(engine.engineConfig.getMapperService(), id, null); + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + } else { + engine.delete(new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get())); + } + if (rarely()) { + if (randomBoolean()) { + engine.flush(); + } else { + engine.refresh("test"); + } + refreshedSeqNo = i; + } + } + if (refreshedSeqNo == -1) { + fromSeqNo = between(0, numOps); + toSeqNo = randomLongBetween(fromSeqNo, numOps * 2); + + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), + fromSeqNo, + toSeqNo, + false, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.size(0)); + } finally { + IOUtils.close(searcher); + } + + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat( + error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") + ); + } finally { + IOUtils.close(searcher); + } + } else { + fromSeqNo = randomLongBetween(0, refreshedSeqNo); + toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2); + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), + fromSeqNo, + toSeqNo, + false, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo)); + } finally { + IOUtils.close(searcher); + } + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat( + error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") + ); + } finally { + IOUtils.close(searcher); + } + toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo); + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE), + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + IndexVersion.current() + ) + ) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } finally { + IOUtils.close(searcher); + } + } + // Get snapshot via engine will auto refresh + fromSeqNo = randomLongBetween(0, numOps - 1); + toSeqNo = randomLongBetween(fromSeqNo, numOps - 1); + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + randomBoolean(), + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } + } + + /** + * A nested document is indexed into Lucene as multiple documents. While the root document has both sequence number and primary term, + * non-root documents don't have primary term but only sequence numbers. This test verifies that {@link LuceneChangesSnapshot} + * correctly skip non-root documents and returns at most one operation per sequence number. + */ + public void testSkipNonRootOfNestedDocuments() throws Exception { + Map seqNoToTerm = new HashMap<>(); + List operations = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); + for (Engine.Operation op : operations) { + if (engine.getLocalCheckpointTracker().hasProcessed(op.seqNo()) == false) { + seqNoToTerm.put(op.seqNo(), op.primaryTerm()); + } + applyOperation(engine, op); + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.rollTranslogGeneration(); + } + if (rarely()) { + engine.flush(); + } + } + long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + engine.refresh("test"); + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + final boolean accessStats = randomBoolean(); + try ( + Translog.Snapshot snapshot = newRandomSnapshot( + engine.engineConfig.getMapperService().mappingLookup(), + searcher, + between(1, 100), + 0, + maxSeqNo, + false, + randomBoolean(), + accessStats, + IndexVersion.current() + ) + ) { + if (accessStats) { + assertThat(snapshot.totalOperations(), equalTo(seqNoToTerm.size())); + } + Translog.Operation op; + while ((op = snapshot.next()) != null) { + assertThat(op.toString(), op.primaryTerm(), equalTo(seqNoToTerm.get(op.seqNo()))); + } + assertThat(snapshot.skippedOperations(), equalTo(0)); + } + } + + public void testUpdateAndReadChangesConcurrently() throws Exception { + Follower[] followers = new Follower[between(1, 3)]; + CountDownLatch readyLatch = new CountDownLatch(followers.length + 1); + AtomicBoolean isDone = new AtomicBoolean(); + for (int i = 0; i < followers.length; i++) { + followers[i] = new Follower(engine, isDone, readyLatch); + followers[i].start(); + } + boolean onPrimary = randomBoolean(); + List operations = new ArrayList<>(); + int numOps = frequently() ? scaledRandomIntBetween(1, 1500) : scaledRandomIntBetween(5000, 20_000); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(0, randomBoolean() ? 10 : numOps * 2)); + ParsedDocument doc = parseDocument(engine.engineConfig.getMapperService(), id, randomAlphaOfLengthBetween(1, 5)); + final Engine.Operation op; + if (onPrimary) { + if (randomBoolean()) { + op = new Engine.Index(newUid(doc), primaryTerm.get(), doc); + } else { + op = new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get()); + } + } else { + if (randomBoolean()) { + op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean()); + } else { + op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong()); + } + } + operations.add(op); + } + readyLatch.countDown(); + readyLatch.await(); + Randomness.shuffle(operations); + concurrentlyApplyOps(operations, engine); + assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L)); + isDone.set(true); + for (Follower follower : followers) { + follower.join(); + IOUtils.close(follower.engine, follower.engine.store); + } + } + + class Follower extends Thread { + private final InternalEngine leader; + private final InternalEngine engine; + private final TranslogHandler translogHandler; + private final AtomicBoolean isDone; + private final CountDownLatch readLatch; + + Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException { + this.leader = leader; + this.isDone = isDone; + this.readLatch = readLatch; + this.engine = createEngine(defaultSettings, createStore(), createTempDir(), newMergePolicy()); + this.translogHandler = new TranslogHandler(engine.engineConfig.getMapperService()); + } + + void pullOperations(InternalEngine follower) throws IOException { + long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint(); + long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint(); + if (followerCheckpoint < leaderCheckpoint) { + long fromSeqNo = followerCheckpoint + 1; + long batchSize = randomLongBetween(0, 100); + long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint); + try ( + Translog.Snapshot snapshot = leader.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + translogHandler.run(follower, snapshot); + } + } + } + + @Override + public void run() { + try { + readLatch.countDown(); + readLatch.await(); + while (isDone.get() == false + || engine.getLocalCheckpointTracker().getProcessedCheckpoint() < leader.getLocalCheckpointTracker() + .getProcessedCheckpoint()) { + pullOperations(engine); + } + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); + // have to verify without source since we are randomly testing without _source + List docsWithoutSourceOnFollower = getDocIds(engine, true).stream() + .map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version())) + .toList(); + List docsWithoutSourceOnLeader = getDocIds(leader, true).stream() + .map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version())) + .toList(); + assertThat(docsWithoutSourceOnFollower, equalTo(docsWithoutSourceOnLeader)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + } + + private List drainAll(Translog.Snapshot snapshot) throws IOException { + List operations = new ArrayList<>(); + Translog.Operation op; + while ((op = snapshot.next()) != null) { + final Translog.Operation newOp = op; + logger.trace("Reading [{}]", op); + assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]"; + operations.add(newOp); + } + return operations; + } + + public void testOverFlow() throws Exception { + long fromSeqNo = randomLongBetween(0, 5); + long toSeqNo = randomLongBetween(Long.MAX_VALUE - 5, Long.MAX_VALUE); + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat( + error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") + ); + } + } + + public void testStats() throws Exception { + try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + int numOps = between(100, 5000); + long startingSeqNo = randomLongBetween(0, Integer.MAX_VALUE); + List operations = generateHistoryOnReplica( + numOps, + startingSeqNo, + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + applyOperations(engine, operations); + + LongSupplier fromSeqNo = () -> { + if (randomBoolean()) { + return 0L; + } else if (randomBoolean()) { + return startingSeqNo; + } else { + return randomLongBetween(0, startingSeqNo); + } + }; + + LongSupplier toSeqNo = () -> { + final long maxSeqNo = engine.getSeqNoStats(-1).getMaxSeqNo(); + if (randomBoolean()) { + return maxSeqNo; + } else if (randomBoolean()) { + return Long.MAX_VALUE; + } else { + return randomLongBetween(maxSeqNo, Long.MAX_VALUE); + } + }; + // Can't access stats if didn't request it + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo.getAsLong(), + toSeqNo.getAsLong(), + false, + randomBoolean(), + false, + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + IllegalStateException error = expectThrows(IllegalStateException.class, snapshot::totalOperations); + assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false")); + final List translogOps = drainAll(snapshot); + assertThat(translogOps, hasSize(numOps)); + error = expectThrows(IllegalStateException.class, snapshot::totalOperations); + assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false")); + } + // Access stats and operations + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + fromSeqNo.getAsLong(), + toSeqNo.getAsLong(), + false, + randomBoolean(), + true, + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { + assertThat(snapshot.totalOperations(), equalTo(numOps)); + final List translogOps = drainAll(snapshot); + assertThat(translogOps, hasSize(numOps)); + assertThat(snapshot.totalOperations(), equalTo(numOps)); + } + // Verify count + assertThat(engine.countChanges("test", fromSeqNo.getAsLong(), toSeqNo.getAsLong()), equalTo(numOps)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 4d6a30849e263..bec9cb5fa9be0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING; @@ -405,16 +406,114 @@ public void testRecoverySourceWithSourceDisabled() throws IOException { } } - public void testRecoverySourceWithSyntheticSource() throws IOException { + public void testRecoverySourceWitInvalidSettings() { { - MapperService mapperService = createMapperService( - topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "synthetic").endObject()) + Settings settings = Settings.builder().put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true).build(); + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> createMapperService(settings, topMapping(b -> {})) + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "The setting [%s] is only permitted", + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ) + ); + } + + { + Settings settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> createMapperService(settings, topMapping(b -> {})) + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "The setting [%s] is only permitted", + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ) + ); + } + { + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> createMapperService(settings, topMapping(b -> {})) + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "The setting [%s] is only permitted", + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ) ); + } + { + Settings settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> createMapperService( + IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY), + settings, + () -> false, + topMapping(b -> {}) + ) + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "The setting [%s] is unavailable on this cluster", + IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey() + ) + ) + ); + } + } + + public void testRecoverySourceWithSyntheticSource() throws IOException { + { + Settings settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString()) + .build(); + MapperService mapperService = createMapperService(settings, topMapping(b -> {})); DocumentMapper docMapper = mapperService.documentMapper(); - ParsedDocument doc = docMapper.parse(source(b -> { b.field("field1", "value1"); })); + ParsedDocument doc = docMapper.parse(source(b -> b.field("field1", "value1"))); assertNotNull(doc.rootDoc().getField("_recovery_source")); assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"field1\":\"value1\"}"))); } + { + Settings settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + MapperService mapperService = createMapperService(settings, topMapping(b -> {})); + DocumentMapper docMapper = mapperService.documentMapper(); + ParsedDocument doc = docMapper.parse(source(b -> b.field("field1", "value1"))); + assertNotNull(doc.rootDoc().getField("_recovery_source_size")); + assertThat(doc.rootDoc().getField("_recovery_source_size").numericValue(), equalTo(19L)); + } { Settings settings = Settings.builder().put(INDICES_RECOVERY_SOURCE_ENABLED_SETTING.getKey(), false).build(); MapperService mapperService = createMapperService( @@ -436,6 +535,17 @@ public void testRecoverySourceWithLogs() throws IOException { assertNotNull(doc.rootDoc().getField("_recovery_source")); assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"@timestamp\":\"2012-02-13\"}"))); } + { + Settings settings = Settings.builder() + .put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + MapperService mapperService = createMapperService(settings, mapping(b -> {})); + DocumentMapper docMapper = mapperService.documentMapper(); + ParsedDocument doc = docMapper.parse(source(b -> { b.field("@timestamp", "2012-02-13"); })); + assertNotNull(doc.rootDoc().getField("_recovery_source_size")); + assertThat(doc.rootDoc().getField("_recovery_source_size").numericValue(), equalTo(27L)); + } { Settings settings = Settings.builder() .put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()) diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 49b1362436ec7..0357d02dbbb98 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -486,7 +487,8 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { Long.MAX_VALUE, false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); @@ -513,7 +515,8 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { Long.MAX_VALUE, false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); @@ -608,7 +611,17 @@ public void testSeqNoCollision() throws Exception { shards.promoteReplicaToPrimary(replica2).get(); logger.info("--> Recover replica3 from replica2"); recoverReplica(replica3, replica2, true); - try (Translog.Snapshot snapshot = replica3.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), true)) { + try ( + Translog.Snapshot snapshot = replica3.newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + true, + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); final List expectedOps = new ArrayList<>(initOperations); expectedOps.add(op2); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index d480f7bfc8d7f..eacb4cf35a422 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1819,7 +1819,15 @@ public void testShardFieldStats() throws IOException { shard.refresh("test"); } else { // trigger internal refresh - shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean()).close(); + shard.newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ).close(); } assertThat(shard.getShardFieldStats(), sameInstance(stats)); // index more docs @@ -1837,7 +1845,15 @@ public void testShardFieldStats() throws IOException { shard.refresh("test"); } else { // trigger internal refresh - shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean()).close(); + shard.newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ).close(); } stats = shard.getShardFieldStats(); assertThat(stats.numSegments(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 9e7f5fbbce1a3..ca616dc619ec9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -158,7 +158,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { System::nanoTime, null, true, - null + EngineTestCase.createMapperService() ); engine = new InternalEngine(config); EngineTestCase.recoverFromTranslog(engine, (e, s) -> 0, Long.MAX_VALUE); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 315eaaf9ffaf1..aef58cee04899 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; @@ -211,7 +212,8 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { Long.MAX_VALUE, false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { assertThat(snapshot, SnapshotMatchers.size(6)); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java new file mode 100644 index 0000000000000..02593e41f5d84 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceSingleNodeTests.java @@ -0,0 +1,3011 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.search; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHitCountCollectorManager; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClosePointInTimeRequest; +import org.elasticsearch.action.search.OpenPointInTimeRequest; +import org.elasticsearch.action.search.SearchPhaseController; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.SearchShardTask; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.TransportClosePointInTimeAction; +import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchService.ResultsType; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregationReduceContext; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService; +import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.fetch.subphase.FieldAndFormat; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.ReaderContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.NonCountingTermQuery; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.RankShardResult; +import org.elasticsearch.search.rank.TestRankBuilder; +import org.elasticsearch.search.rank.TestRankShardResult; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.feature.RankFeatureResult; +import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; +import org.elasticsearch.search.rank.feature.RankFeatureShardResult; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.tasks.TaskCancelHelper; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntConsumer; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; +import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; +import static org.elasticsearch.search.SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED; +import static org.elasticsearch.search.SearchService.SEARCH_WORKER_THREADS_ENABLED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; + +public class SearchServiceSingleNodeTests extends ESSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Collection> getPlugins() { + return pluginList( + FailOnRewriteQueryPlugin.class, + CustomScriptPlugin.class, + ReaderWrapperCountPlugin.class, + InternalOrPrivateSettingsPlugin.class, + MockSearchService.TestPlugin.class + ); + } + + public static class ReaderWrapperCountPlugin extends Plugin { + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.setReaderWrapper(service -> SearchServiceSingleNodeTests::apply); + } + } + + @Before + public void resetCount() { + numWrapInvocations = new AtomicInteger(0); + } + + private static AtomicInteger numWrapInvocations = new AtomicInteger(0); + + private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException { + numWrapInvocations.incrementAndGet(); + return new FilterDirectoryReader(directoryReader, new FilterDirectoryReader.SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return reader; + } + }) { + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return in; + } + + @Override + public CacheHelper getReaderCacheHelper() { + return directoryReader.getReaderCacheHelper(); + } + }; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + static final String DUMMY_SCRIPT = "dummyScript"; + + @Override + protected Map, Object>> pluginScripts() { + return Collections.singletonMap(DUMMY_SCRIPT, vars -> "dummy"); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.addSearchOperationListener(new SearchOperationListener() { + @Override + public void onFetchPhase(SearchContext context, long tookInNanos) { + if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); + } else { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + } + } + + @Override + public void onQueryPhase(SearchContext context, long tookInNanos) { + if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); + } else { + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + } + } + }); + } + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put("search.default_search_timeout", "5s").build(); + } + + public void testClearOnClose() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + assertEquals(0, service.getActiveContexts()); + } + + public void testClearOnStop() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.doStop(); + assertEquals(0, service.getActiveContexts()); + } + + public void testClearIndexDelete() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + assertAcked(indicesAdmin().prepareDelete("index")); + awaitIndexShardCloseAsyncTasks(); + assertEquals(0, service.getActiveContexts()); + } + + public void testCloseSearchContextOnRewriteException() { + // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off + createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + final int activeContexts = service.getActiveContexts(); + final int activeRefs = indexShard.store().refCount(); + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get() + ); + assertEquals(activeContexts, service.getActiveContexts()); + assertEquals(activeRefs, indexShard.store().refCount()); + } + + public void testSearchWhileIndexDeleted() throws InterruptedException { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + AtomicBoolean running = new AtomicBoolean(true); + CountDownLatch startGun = new CountDownLatch(1); + final int permitCount = 100; + Semaphore semaphore = new Semaphore(permitCount); + ShardRouting routing = TestShardRouting.newShardRouting( + indexShard.shardId(), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ); + final Thread thread = new Thread(() -> { + startGun.countDown(); + while (running.get()) { + if (randomBoolean()) { + service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); + } else { + service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); + } + if (randomBoolean()) { + // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search + // context in a non-sane way. + try { + semaphore.acquire(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + prepareIndex("index").setSource("field", "value") + .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) + .execute(ActionListener.running(semaphore::release)); + } + } + }); + thread.start(); + startGun.await(); + try { + final int rounds = scaledRandomIntBetween(100, 10000); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) + .scroll(new Scroll(TimeValue.timeValueMinutes(1))); + for (int i = 0; i < rounds; i++) { + try { + try { + PlainActionFuture result = new PlainActionFuture<>(); + final boolean useScroll = randomBoolean(); + service.executeQueryPhase( + new ShardSearchRequest( + OriginalIndices.NONE, + useScroll ? scrollSearchRequest : searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ), + new SearchShardTask(123L, "", "", "", null, emptyMap()), + result.delegateFailure((l, r) -> { + r.incRef(); + l.onResponse(r); + }) + ); + final SearchPhaseResult searchPhaseResult = result.get(); + try { + List intCursors = new ArrayList<>(1); + intCursors.add(0); + ShardFetchRequest req = new ShardFetchRequest( + searchPhaseResult.getContextId(), + intCursors, + null/* not a scroll */ + ); + PlainActionFuture listener = new PlainActionFuture<>(); + service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, emptyMap()), listener); + listener.get(); + if (useScroll) { + // have to free context since this test does not remove the index from IndicesService. + service.freeReaderContext(searchPhaseResult.getContextId()); + } + } finally { + searchPhaseResult.decRef(); + } + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(RuntimeException.class)); + throw ((RuntimeException) ex.getCause()); + } + } catch (AlreadyClosedException ex) { + throw ex; + } catch (IllegalStateException ex) { + assertEquals(AbstractRefCounted.ALREADY_CLOSED_MESSAGE, ex.getMessage()); + } catch (SearchContextMissingException ex) { + // that's fine + } + } + } finally { + running.set(false); + thread.join(); + semaphore.acquire(permitCount); + } + + assertEquals(0, service.getActiveContexts()); + + SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); + assertEquals(0, totalStats.getQueryCurrent()); + assertEquals(0, totalStats.getScrollCurrent()); + assertEquals(0, totalStats.getFetchCurrent()); + } + + public void testRankFeaturePhaseSearchPhases() throws InterruptedException, ExecutionException { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex(indexName)); + final IndexShard indexShard = indexService.getShard(0); + SearchShardTask searchTask = new SearchShardTask(123L, "", "", "", null, emptyMap()); + + // create a SearchRequest that will return all documents and defines a TestRankBuilder with shard-level only operations + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) + .source( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(DEFAULT_SIZE) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = (numDocs - i) + randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + } + ) + ); + + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + QuerySearchResult queryResult = null; + RankFeatureResult rankResult = null; + try { + // Execute the query phase and store the result in a SearchPhaseResult container using a PlainActionFuture + PlainActionFuture queryPhaseResults = new PlainActionFuture<>(); + service.executeQueryPhase(request, searchTask, queryPhaseResults); + queryResult = (QuerySearchResult) queryPhaseResults.get(); + + // these are the matched docs from the query phase + final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; + + // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon + List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); + + // now we create a RankFeatureShardRequest to extract feature info for the top-docs above + RankFeatureShardRequest rankFeatureShardRequest = new RankFeatureShardRequest( + OriginalIndices.NONE, + queryResult.getContextId(), // use the context from the query phase + request, + topRankWindowSizeDocs + ); + PlainActionFuture rankPhaseResults = new PlainActionFuture<>(); + service.executeRankFeaturePhase(rankFeatureShardRequest, searchTask, rankPhaseResults); + rankResult = rankPhaseResults.get(); + + assertNotNull(rankResult); + assertNotNull(rankResult.rankFeatureResult()); + RankFeatureShardResult rankFeatureShardResult = rankResult.rankFeatureResult().shardResult(); + assertNotNull(rankFeatureShardResult); + + List sortedRankWindowDocs = topRankWindowSizeDocs.stream().sorted().toList(); + assertEquals(sortedRankWindowDocs.size(), rankFeatureShardResult.rankFeatureDocs.length); + for (int i = 0; i < sortedRankWindowDocs.size(); i++) { + assertEquals((long) sortedRankWindowDocs.get(i), rankFeatureShardResult.rankFeatureDocs[i].doc); + assertEquals(rankFeatureShardResult.rankFeatureDocs[i].featureData, "aardvark_" + sortedRankWindowDocs.get(i)); + } + + List globalTopKResults = randomNonEmptySubsetOf( + Arrays.stream(rankFeatureShardResult.rankFeatureDocs).map(x -> x.doc).toList() + ); + + // finally let's create a fetch request to bring back fetch info for the top results + ShardFetchSearchRequest fetchRequest = new ShardFetchSearchRequest( + OriginalIndices.NONE, + rankResult.getContextId(), + request, + globalTopKResults, + null, + null, + rankResult.getRescoreDocIds(), + null + ); + + // execute fetch phase and perform any validations once we retrieve the response + // the difference in how we do assertions here is needed because once the transport service sends back the response + // it decrements the reference to the FetchSearchResult (through the ActionListener#respondAndRelease) and sets hits to null + PlainActionFuture fetchListener = new PlainActionFuture<>() { + @Override + public void onResponse(FetchSearchResult fetchSearchResult) { + assertNotNull(fetchSearchResult); + assertNotNull(fetchSearchResult.hits()); + + int totalHits = fetchSearchResult.hits().getHits().length; + assertEquals(globalTopKResults.size(), totalHits); + for (int i = 0; i < totalHits; i++) { + // rank and score are set by the SearchPhaseController#merge so no need to validate that here + SearchHit hit = fetchSearchResult.hits().getAt(i); + assertNotNull(hit.getFields().get(fetchFieldName)); + assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); + } + super.onResponse(fetchSearchResult); + } + + @Override + public void onFailure(Exception e) { + super.onFailure(e); + throw new AssertionError("No failure should have been raised", e); + } + }; + service.executeFetchPhase(fetchRequest, searchTask, fetchListener); + fetchListener.get(); + } catch (Exception ex) { + if (queryResult != null) { + if (queryResult.hasReferences()) { + queryResult.decRef(); + } + service.freeReaderContext(queryResult.getContextId()); + } + if (rankResult != null && rankResult.hasReferences()) { + rankResult.decRef(); + } + throw ex; + } + } + + public void testRankFeaturePhaseUsingClient() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 4; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + ElasticsearchAssertions.assertResponse( + client().prepareSearch(indexName) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(2) + .from(2) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + } + ) + ), + (response) -> { + SearchHits hits = response.getHits(); + assertEquals(hits.getTotalHits().value(), numDocs); + assertEquals(hits.getHits().length, 2); + int index = 0; + for (SearchHit hit : hits.getHits()) { + assertEquals(hit.getRank(), 3 + index); + assertTrue(hit.getScore() >= 0); + assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); + index++; + } + } + ); + } + + public void testRankFeaturePhaseExceptionOnCoordinatingNode() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(indexName) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .size(2) + .from(2) + .fetchField(fetchFieldName) + .rankBuilder(new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + throw new IllegalStateException("should have failed earlier"); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + throw new UnsupportedOperationException("simulated failure"); + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + }; + } + }) + ) + .get() + ); + } + + public void testRankFeaturePhaseExceptionAllShardFail() { + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(indexName) + .setAllowPartialSearchResults(true) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + throw new UnsupportedOperationException("simulated failure"); + } + }; + } + } + ) + ) + .get() + ); + } + + public void testRankFeaturePhaseExceptionOneShardFails() { + // if we have only one shard and it fails, it will fallback to context.onPhaseFailure which will eventually clean up all contexts. + // in this test we want to make sure that even if one shard (of many) fails during the RankFeaturePhase, then the appropriate + // context will have been cleaned up. + final String indexName = "index"; + final String rankFeatureFieldName = "field"; + final String searchFieldName = "search_field"; + final String searchFieldValue = "some_value"; + final String fetchFieldName = "fetch_field"; + final String fetchFieldValue = "fetch_value"; + + final int minDocs = 3; + final int maxDocs = 10; + int numDocs = between(minDocs, maxDocs); + createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build()); + // index some documents + for (int i = 0; i < numDocs; i++) { + prepareIndex(indexName).setId(String.valueOf(i)) + .setSource( + rankFeatureFieldName, + "aardvark_" + i, + searchFieldName, + searchFieldValue, + fetchFieldName, + fetchFieldValue + "_" + i + ) + .get(); + } + indicesAdmin().prepareRefresh(indexName).get(); + + assertResponse( + client().prepareSearch(indexName) + .setAllowPartialSearchResults(true) + .setSource( + new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) + .fetchField(fetchFieldName) + .rankBuilder( + // here we override only the shard-level contexts + new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + + // no need for more than one queries + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( + int size, + int from, + Client client + ) { + return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + scores[i] = featureDocs[i].score; + } + scoreListener.onResponse(scores); + } + }; + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { + @Override + public ScoreDoc[] rankQueryPhaseResults( + List querySearchResults, + SearchPhaseController.TopDocsStats topDocStats + ) { + List rankDocs = new ArrayList<>(); + for (int i = 0; i < querySearchResults.size(); i++) { + QuerySearchResult querySearchResult = querySearchResults.get(i); + TestRankShardResult shardResult = (TestRankShardResult) querySearchResult + .getRankShardResult(); + for (RankDoc trd : shardResult.testRankDocs) { + trd.shardIndex = i; + rankDocs.add(trd); + } + } + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); + topDocStats.fetchHits = topResults.length; + return topResults; + } + }; + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new QueryPhaseRankShardContext(queries, from) { + + @Override + public int rankWindowSize() { + return DEFAULT_RANK_WINDOW_SIZE; + } + + @Override + public RankShardResult combineQueryPhaseResults(List rankResults) { + // we know we have just 1 query, so return all the docs from it + return new TestRankShardResult( + Arrays.stream(rankResults.get(0).scoreDocs) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) + .limit(rankWindowSize()) + .toArray(RankDoc[]::new) + ); + } + }; + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { + @Override + public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { + if (shardId == 0) { + throw new UnsupportedOperationException("simulated failure"); + } else { + RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); + rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); + rankFeatureDocs[i].score = randomFloat(); + rankFeatureDocs[i].rank = i + 1; + } + return new RankFeatureShardResult(rankFeatureDocs); + } + } + }; + } + } + ) + ), + (searchResponse) -> { + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals("simulated failure", searchResponse.getShardFailures()[0].getCause().getMessage()); + assertNotEquals(0, searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertEquals(fetchFieldValue + "_" + hit.getId(), hit.getFields().get(fetchFieldName).getValue()); + assertEquals(1, hit.getShard().getShardId().id()); + } + } + ); + } + + public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws ExecutionException, InterruptedException { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + service.setOnPutContext(context -> { + if (context.indexShard() == indexShard) { + assertAcked(indicesAdmin().prepareDelete("index")); + } + }); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) + .scroll(new Scroll(TimeValue.timeValueMinutes(1))); + + // the scrolls are not explicitly freed, but should all be gone when the test finished. + // for completeness, we also randomly test the regular search path. + final boolean useScroll = randomBoolean(); + PlainActionFuture result = new PlainActionFuture<>(); + service.executeQueryPhase( + new ShardSearchRequest( + OriginalIndices.NONE, + useScroll ? scrollSearchRequest : searchRequest, + new ShardId(resolveIndex("index"), 0), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ), + new SearchShardTask(123L, "", "", "", null, emptyMap()), + result + ); + + try { + result.get(); + } catch (Exception e) { + // ok + } + + expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().setIndices("index").get()); + + assertEquals(0, service.getActiveContexts()); + + SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); + assertEquals(0, totalStats.getQueryCurrent()); + assertEquals(0, totalStats.getScrollCurrent()); + assertEquals(0, totalStats.getFetchCurrent()); + } + + public void testBeforeShardLockDuringShardCreate() { + IndexService indexService = createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertResponse( + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), + searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) + ); + SearchService service = getInstanceFromNode(SearchService.class); + + assertEquals(1, service.getActiveContexts()); + service.beforeIndexShardCreated( + TestShardRouting.newShardRouting( + "test", + 0, + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ), + indexService.getIndexSettings().getSettings() + ); + assertEquals(1, service.getActiveContexts()); + + service.beforeIndexShardCreated( + TestShardRouting.newShardRouting( + new ShardId(indexService.index(), 0), + randomAlphaOfLength(5), + randomBoolean(), + ShardRoutingState.INITIALIZING + ), + indexService.getIndexSettings().getSettings() + ); + assertEquals(0, service.getActiveContexts()); + } + + public void testTimeout() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + final ShardSearchRequest requestWithDefaultTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext contextWithDefaultTimeout = service.createContext( + reader, + requestWithDefaultTimeout, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + // the search context should inherit the default timeout + assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); + } + + final long seconds = randomIntBetween(6, 10); + searchRequest.source(new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds))); + final ShardSearchRequest requestWithCustomTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext( + reader, + requestWithCustomTimeout, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + // the search context should inherit the query timeout + assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); + } + } + + /** + * test that getting more than the allowed number of docvalue_fields throws an exception + */ + public void testMaxDocvalueFieldsSearch() throws IOException { + final Settings settings = Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 1).build(); + createIndex("index", settings, null, "field1", "keyword", "field2", "keyword"); + prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.docValueField("field1"); + + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertNotNull(context); + } + + searchSourceBuilder.docValueField("unmapped_field"); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertNotNull(context); + } + + searchSourceBuilder.docValueField("field2"); + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals( + "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [1] but was [2]. " + + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", + ex.getMessage() + ); + } + } + + public void testDeduplicateDocValuesFields() throws Exception { + createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); + prepareIndex("index").setId("1").setSource("field1", "2022-08-03", "field2", "2022-08-04").setRefreshPolicy(IMMEDIATE).get(); + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.docValueField("f*"); + if (randomBoolean()) { + searchSourceBuilder.docValueField("field*"); + } + if (randomBoolean()) { + searchSourceBuilder.docValueField("*2"); + } + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + SearchContext context = service.createContext( + reader, + request, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + Collection fields = context.docValuesContext().fields(); + assertThat(fields, containsInAnyOrder(new FieldAndFormat("field1", null), new FieldAndFormat("field2", null))); + } + } + } + + /** + * test that getting more than the allowed number of script_fields throws an exception + */ + public void testMaxScriptFieldsSearch() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + // adding the maximum allowed number of script_fields to retrieve + int maxScriptFields = indexService.getIndexSettings().getMaxScriptFields(); + for (int i = 0; i < maxScriptFields; i++) { + searchSourceBuilder.scriptField( + "field" + i, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + } + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { + try ( + SearchContext context = service.createContext( + reader, + request, + mock(SearchShardTask.class), + ResultsType.NONE, + randomBoolean() + ) + ) { + assertNotNull(context); + } + searchSourceBuilder.scriptField( + "anotherScriptField", + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals( + "Trying to retrieve too many script_fields. Must be less than or equal to: [" + + maxScriptFields + + "] but was [" + + (maxScriptFields + 1) + + "]. This limit can be set by changing the [index.max_script_fields] index level setting.", + ex.getMessage() + ); + } + } + + public void testIgnoreScriptfieldIfSizeZero() throws IOException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchRequest.source(searchSourceBuilder); + searchSourceBuilder.scriptField( + "field" + 0, + new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) + ); + searchSourceBuilder.size(0); + final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + try ( + ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ) { + assertEquals(0, context.scriptFields().fields().size()); + } + } + + /** + * test that creating more than the allowed number of scroll contexts throws an exception + */ + public void testMaxOpenScrollContexts() throws Exception { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + // Open all possible scrolls, clear some of them, then open more until the limit is reached + LinkedList clearScrollIds = new LinkedList<>(); + + for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) { + assertResponse(client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), searchResponse -> { + if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId()); + }); + } + + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.setScrollIds(clearScrollIds); + client().clearScroll(clearScrollRequest).get(); + + for (int i = 0; i < clearScrollIds.size(); i++) { + client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)).get().decRef(); + } + + final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); + ElasticsearchException ex = expectThrows( + ElasticsearchException.class, + () -> service.createAndPutReaderContext( + request, + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ) + ); + assertEquals( + "Trying to create too many scroll contexts. Must be less than or equal to: [" + + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_scroll_context] setting.", + ex.getMessage() + ); + assertEquals(RestStatus.TOO_MANY_REQUESTS, ex.status()); + + service.freeAllScrollContexts(); + } + + public void testOpenScrollContextsConcurrently() throws Exception { + createIndex("index"); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + final int maxScrollContexts = SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); + final SearchService searchService = getInstanceFromNode(SearchService.class); + Thread[] threads = new Thread[randomIntBetween(2, 8)]; + CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (;;) { + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + try { + final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); + searchService.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + } catch (ElasticsearchException e) { + assertThat( + e.getMessage(), + equalTo( + "Trying to create too many scroll contexts. Must be less than or equal to: " + + "[" + + maxScrollContexts + + "]. " + + "This limit can be set by changing the [search.max_open_scroll_context] setting." + ) + ); + return; + } + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("elasticsearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(searchService.getActiveContexts(), equalTo(maxScrollContexts)); + searchService.freeAllScrollContexts(); + } + + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { + @Override + public List> getQueries() { + return singletonList(new QuerySpec<>("fail_on_rewrite_query", FailOnRewriteQueryBuilder::new, parseContext -> { + throw new UnsupportedOperationException("No query parser for this plugin"); + })); + } + } + + public static class FailOnRewriteQueryBuilder extends DummyQueryBuilder { + + public FailOnRewriteQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + public FailOnRewriteQueryBuilder() {} + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { + if (queryRewriteContext.convertToSearchExecutionContext() != null) { + throw new IllegalStateException("Fail on rewrite phase"); + } + return this; + } + } + + private static class ShardScrollRequestTest extends ShardSearchRequest { + private Scroll scroll; + + ShardScrollRequestTest(ShardId shardId) { + super( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + shardId, + 0, + 1, + AliasFilter.EMPTY, + 1f, + -1, + null + ); + this.scroll = new Scroll(TimeValue.timeValueMinutes(1)); + } + + @Override + public Scroll scroll() { + return this.scroll; + } + } + + public void testCanMatch() throws Exception { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder()); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) + ); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + searchRequest.source( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) + ); + assertTrue( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + + searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + assertFalse( + service.canMatch( + new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) + ).canMatch() + ); + assertEquals(5, numWrapInvocations.get()); + + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + /* + * Checks that canMatch takes into account the alias filter + */ + // the source cannot be rewritten to a match_none + searchRequest.indices("alias").source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + assertFalse( + service.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), + 1f, + -1, + null + ) + ).canMatch() + ); + // the source can match and can be rewritten to a match_none, but not the alias filter + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + searchRequest.indices("alias").source(new SearchSourceBuilder().query(new TermQueryBuilder("id", "1"))); + assertFalse( + service.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), + 1f, + -1, + null + ) + ).canMatch() + ); + + CountDownLatch latch = new CountDownLatch(1); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + // Because the foo field used in alias filter is unmapped the term query builder rewrite can resolve to a match no docs query, + // without acquiring a searcher and that means the wrapper is not called + assertEquals(5, numWrapInvocations.get()); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + // make sure that the wrapper is called when the query is actually executed + assertEquals(6, numWrapInvocations.get()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + public void testCanRewriteToMatchNone() { + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) + ) + ); + assertFalse(SearchService.canRewriteToMatchNone(new SearchSourceBuilder())); + assertFalse(SearchService.canRewriteToMatchNone(null)); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) + ) + ); + assertTrue(SearchService.canRewriteToMatchNone(new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")))); + assertTrue( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) + ) + ); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) + .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) + .suggest(new SuggestBuilder()) + ) + ); + assertFalse( + SearchService.canRewriteToMatchNone( + new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).suggest(new SuggestBuilder()) + ) + ); + } + + public void testSetSearchThrottled() throws IOException { + createIndex("throttled_threadpool_index"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "true" + ) + ).actionGet(); + final SearchService service = getInstanceFromNode(SearchService.class); + Index index = resolveIndex("throttled_threadpool_index"); + assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertSearchHits( + client().prepareSearch("throttled_threadpool_index") + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) + .setSize(1), + "1" + ); + // we add a search action listener in a plugin above to assert that this is actually used + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "false" + ) + ).actionGet(); + + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> indicesAdmin().prepareUpdateSettings("throttled_threadpool_index") + .setSettings(Settings.builder().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), false)) + .get() + ); + assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); + assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); + } + + public void testAggContextGetsMatchAll() throws IOException { + createIndex("test"); + withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); + } + + public void testAggContextGetsNestedFilter() throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("properties"); + mapping.startObject("nested").field("type", "nested").endObject(); + mapping.endObject().endObject(); + + createIndex("test", Settings.EMPTY, mapping); + withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); + } + + /** + * Build an {@link AggregationContext} with the named index. + */ + private void withAggregationContext(String index, Consumer check) throws IOException { + IndexService indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(resolveIndex(index)); + ShardId shardId = new ShardId(indexService.index(), 0); + + SearchRequest request = new SearchRequest().indices(index) + .source(new SearchSourceBuilder().aggregation(new FiltersAggregationBuilder("test", new MatchAllQueryBuilder()))) + .allowPartialSearchResults(false); + ShardSearchRequest shardRequest = new ShardSearchRequest( + OriginalIndices.NONE, + request, + shardId, + 0, + 1, + AliasFilter.EMPTY, + 1, + 0, + null + ); + + try (ReaderContext readerContext = createReaderContext(indexService, indexService.getShard(0))) { + try ( + SearchContext context = getInstanceFromNode(SearchService.class).createContext( + readerContext, + shardRequest, + mock(SearchShardTask.class), + ResultsType.QUERY, + true + ) + ) { + check.accept(context.aggregations().factories().context()); + } + } + } + + public void testExpandSearchThrottled() { + createIndex("throttled_threadpool_index"); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( + "throttled_threadpool_index", + IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), + "true" + ) + ).actionGet(); + + prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(client().prepareSearch(), 1L); + assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); + } + + public void testExpandSearchFrozen() { + String indexName = "frozen_index"; + createIndex(indexName); + client().execute( + InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, + new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request(indexName, "index.frozen", "true") + ).actionGet(); + + prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + assertHitCount(client().prepareSearch(), 0L); + assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); + assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); + } + + public void testCreateReduceContext() { + SearchService service = getInstanceFromNode(SearchService.class); + AggregationReduceContext.Builder reduceContextBuilder = service.aggReduceContextBuilder( + () -> false, + new SearchRequest().source(new SearchSourceBuilder()).source().aggregations() + ); + { + AggregationReduceContext reduceContext = reduceContextBuilder.forFinalReduction(); + expectThrows( + MultiBucketConsumerService.TooManyBucketsException.class, + () -> reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1) + ); + } + { + AggregationReduceContext reduceContext = reduceContextBuilder.forPartialReduction(); + reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1); + } + } + + public void testMultiBucketConsumerServiceCB() { + MultiBucketConsumerService service = new MultiBucketConsumerService( + getInstanceFromNode(ClusterService.class), + Settings.EMPTY, + new NoopCircuitBreaker("test") { + + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + throw new CircuitBreakingException("tripped", getDurability()); + } + } + ); + // for partial + { + IntConsumer consumer = service.createForPartial(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + // for final + { + IntConsumer consumer = service.createForFinal(); + for (int i = 0; i < 1023; i++) { + consumer.accept(0); + } + CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); + assertThat(ex.getMessage(), equalTo("tripped")); + } + } + + public void testCreateSearchContext() throws IOException { + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias + ); + try (SearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + SearchShardTarget searchShardTarget = searchContext.shardTarget(); + SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); + String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index; + assertEquals(expectedIndexName, searchExecutionContext.getFullyQualifiedIndex().getName()); + assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName()); + assertEquals(clusterAlias, searchShardTarget.getClusterAlias()); + assertEquals(shardId, searchShardTarget.getShardId()); + + assertNull(searchContext.dfsResult()); + searchContext.addDfsResult(); + assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget()); + + assertNull(searchContext.queryResult()); + searchContext.addQueryResult(); + assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget()); + + assertNull(searchContext.fetchResult()); + searchContext.addFetchResult(); + assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget()); + } + } + + /** + * While we have no NPE in DefaultContext constructor anymore, we still want to guard against it (or other failures) in the future to + * avoid leaking searchers. + */ + public void testCreateSearchContextFailure() throws Exception { + final String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + final IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + final ShardId shardId = new ShardId(indexService.index(), 0); + final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { + @Override + public SearchType searchType() { + // induce an artificial NPE + throw new NullPointerException("expected"); + } + }; + try (ReaderContext reader = createReaderContext(indexService, indexService.getShard(shardId.id()))) { + NullPointerException e = expectThrows( + NullPointerException.class, + () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) + ); + assertEquals("expected", e.getMessage()); + } + // Needs to busily assert because Engine#refreshNeeded can increase the refCount. + assertBusy( + () -> assertEquals("should have 2 store refs (IndexService + InternalEngine)", 2, indexService.getShard(0).store().refCount()) + ); + } + + public void testMatchNoDocsEmptyResponse() throws InterruptedException { + createIndex("index"); + Thread currentThread = Thread.currentThread(); + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().aggregation(AggregationBuilders.count("count").field("value"))); + ShardSearchRequest shardRequest = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 5, + AliasFilter.EMPTY, + 1.0f, + 0, + null + ); + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.source().query(new MatchAllQueryBuilder()); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + assertNotSame(Thread.currentThread(), currentThread); + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertFalse(result.queryResult().isNull()); + assertNotNull(result.queryResult().topDocs()); + assertNotNull(result.queryResult().aggregations()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exc) { + try { + throw new AssertionError(exc); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.source().query(new MatchNoneQueryBuilder()); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + assertNotSame(Thread.currentThread(), currentThread); + assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertFalse(result.queryResult().isNull()); + assertNotNull(result.queryResult().topDocs()); + assertNotNull(result.queryResult().aggregations()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception exc) { + try { + throw new AssertionError(exc); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + + { + CountDownLatch latch = new CountDownLatch(1); + shardRequest.canReturnNullResponseIfMatchNoDocs(true); + service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult result) { + try { + // make sure we don't use the search threadpool + assertSame(Thread.currentThread(), currentThread); + assertThat(result, instanceOf(QuerySearchResult.class)); + assertTrue(result.queryResult().isNull()); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + try { + throw new AssertionError(e); + } finally { + latch.countDown(); + } + } + }); + latch.await(); + } + } + + public void testDeleteIndexWhileSearch() throws Exception { + createIndex("test"); + int numDocs = randomIntBetween(1, 20); + for (int i = 0; i < numDocs; i++) { + prepareIndex("test").setSource("f", "v").get(); + } + indicesAdmin().prepareRefresh("test").get(); + AtomicBoolean stopped = new AtomicBoolean(false); + Thread[] searchers = new Thread[randomIntBetween(1, 4)]; + CountDownLatch latch = new CountDownLatch(searchers.length); + for (int i = 0; i < searchers.length; i++) { + searchers[i] = new Thread(() -> { + latch.countDown(); + while (stopped.get() == false) { + try { + client().prepareSearch("test").setRequestCache(false).get().decRef(); + } catch (Exception ignored) { + return; + } + } + }); + searchers[i].start(); + } + latch.await(); + indicesAdmin().prepareDelete("test").get(); + stopped.set(true); + for (Thread searcher : searchers) { + searcher.join(); + } + } + + public void testLookUpSearchContext() throws Exception { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + List contextIds = new ArrayList<>(); + int numContexts = randomIntBetween(1, 10); + CountDownLatch latch = new CountDownLatch(1); + indexShard.getThreadPool().executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + for (int i = 0; i < numContexts; i++) { + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + final ReaderContext context = searchService.createAndPutReaderContext( + request, + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + assertThat(context.id().getId(), equalTo((long) (i + 1))); + contextIds.add(context.id()); + } + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + while (contextIds.isEmpty() == false) { + final ShardSearchContextId contextId = randomFrom(contextIds); + assertFalse(searchService.freeReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + if (randomBoolean()) { + assertTrue(searchService.freeReaderContext(contextId)); + } else { + assertTrue( + searchService.freeReaderContext((new ShardSearchContextId(contextId.getSessionId(), contextId.getId()))) + ); + } + contextIds.remove(contextId); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + assertFalse(searchService.freeReaderContext(contextId)); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + } + } finally { + latch.countDown(); + } + }); + latch.await(); + } + + public void testOpenReaderContext() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testCancelQueryPhaseEarly() throws Exception { + createIndex("index"); + final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + + CountDownLatch latch1 = new CountDownLatch(1); + SearchShardTask task = new SearchShardTask(1, "", "", "", TaskId.EMPTY_TASK_ID, emptyMap()); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch1.countDown(); + } + + @Override + public void onFailure(Exception e) { + try { + fail("Search should not be cancelled"); + } finally { + latch1.countDown(); + } + } + }); + latch1.await(); + + CountDownLatch latch2 = new CountDownLatch(1); + service.executeDfsPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch2.countDown(); + } + + @Override + public void onFailure(Exception e) { + try { + fail("Search should not be cancelled"); + } finally { + latch2.countDown(); + } + } + }); + latch2.await(); + + AtomicBoolean searchContextCreated = new AtomicBoolean(false); + service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); + CountDownLatch latch3 = new CountDownLatch(1); + TaskCancelHelper.cancel(task, "simulated"); + service.executeQueryPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + fail("Search not cancelled early"); + } finally { + service.freeReaderContext(searchPhaseResult.getContextId()); + searchPhaseResult.decRef(); + latch3.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + assertThat(e, is(instanceOf(TaskCancelledException.class))); + assertThat(e.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); + assertThat(searchContextCreated.get(), is(false)); + latch3.countDown(); + } + }); + latch3.await(); + + searchContextCreated.set(false); + CountDownLatch latch4 = new CountDownLatch(1); + service.executeDfsPhase(request, task, new ActionListener<>() { + @Override + public void onResponse(SearchPhaseResult searchPhaseResult) { + try { + fail("Search not cancelled early"); + } finally { + service.freeReaderContext(searchPhaseResult.getContextId()); + latch4.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + assertThat(e, is(instanceOf(TaskCancelledException.class))); + assertThat(e.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); + assertThat(searchContextCreated.get(), is(false)); + latch4.countDown(); + } + }); + latch4.await(); + } + + public void testCancelFetchPhaseEarly() throws Exception { + createIndex("index"); + final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + + AtomicBoolean searchContextCreated = new AtomicBoolean(false); + service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); + + // Test fetch phase is cancelled early + String scrollId; + var searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); + try { + scrollId = searchResponse.getScrollId(); + } finally { + searchResponse.decRef(); + } + + client().searchScroll(new SearchScrollRequest(scrollId)).get().decRef(); + assertThat(searchContextCreated.get(), is(true)); + + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(scrollId); + client().clearScroll(clearScrollRequest); + + searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); + try { + scrollId = searchResponse.getScrollId(); + } finally { + searchResponse.decRef(); + } + searchContextCreated.set(false); + service.setOnCheckCancelled(t -> { + SearchShardTask task = new SearchShardTask(randomLong(), "transport", "action", "", TaskId.EMPTY_TASK_ID, emptyMap()); + TaskCancelHelper.cancel(task, "simulated"); + return task; + }); + CountDownLatch latch = new CountDownLatch(1); + client().searchScroll(new SearchScrollRequest(scrollId), new ActionListener<>() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + fail("Search not cancelled early"); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + Throwable cancelledExc = e.getCause().getCause(); + assertThat(cancelledExc, is(instanceOf(TaskCancelledException.class))); + assertThat(cancelledExc.getMessage(), is("task cancelled [simulated]")); + assertThat(((TaskCancelledException) cancelledExc).status(), is(RestStatus.BAD_REQUEST)); + latch.countDown(); + } + }); + latch.await(); + assertThat(searchContextCreated.get(), is(false)); + + clearScrollRequest.setScrollIds(singletonList(scrollId)); + client().clearScroll(clearScrollRequest); + } + + public void testWaitOnRefresh() throws ExecutionException, InterruptedException { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + PlainActionFuture future = new PlainActionFuture<>(); + service.executeQueryPhase(request, task, future.delegateFailure((l, r) -> { + assertEquals(1, r.queryResult().getTotalHits().value()); + l.onResponse(null); + })); + future.get(); + } + + public void testWaitOnRefreshFailsWithRefreshesDisabled() { + createIndex("index", Settings.builder().put("index.refresh_interval", "-1").build()); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + illegalArgumentException.getMessage(), + containsString("Cannot use wait_for_checkpoints with [index.refresh_interval=-1]") + ); + } + + public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { + createIndex("index"); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + // Increased timeout to avoid cancelling the search task prior to its completion, + // as we expect to raise an Exception. Timeout itself is tested on the following `testWaitOnRefreshTimeout` test. + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(200, 300))); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, future::actionGet); + assertThat( + ex.getMessage(), + containsString("Cannot wait for unissued seqNo checkpoint [wait_for_checkpoint=1, max_issued_seqNo=0]") + ); + } + + public void testWaitOnRefreshTimeout() { + createIndex("index", Settings.builder().put("index.refresh_interval", "60s").build()); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); + searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); + + final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); + assertEquals(RestStatus.CREATED, response.status()); + + SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); + PlainActionFuture future = new PlainActionFuture<>(); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null, + null, + null + ); + service.executeQueryPhase(request, task, future); + + SearchTimeoutException ex = expectThrows(SearchTimeoutException.class, future::actionGet); + assertThat(ex.getMessage(), containsString("Wait for seq_no [0] refreshed timed out [")); + } + + public void testMinimalSearchSourceInShardRequests() { + createIndex("test"); + int numDocs = between(0, 10); + for (int i = 0; i < numDocs; i++) { + prepareIndex("test").setSource("id", Integer.toString(i)).get(); + } + indicesAdmin().prepareRefresh("test").get(); + + BytesReference pitId = client().execute( + TransportOpenPointInTimeAction.TYPE, + new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) + ).actionGet().getPointInTimeId(); + final MockSearchService searchService = (MockSearchService) getInstanceFromNode(SearchService.class); + final List shardRequests = new CopyOnWriteArrayList<>(); + searchService.setOnCreateSearchContext(ctx -> shardRequests.add(ctx.request())); + try { + assertHitCount( + client().prepareSearch() + .setSource( + new SearchSourceBuilder().size(between(numDocs, numDocs * 2)).pointInTimeBuilder(new PointInTimeBuilder(pitId)) + ), + numDocs + ); + } finally { + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); + } + assertThat(shardRequests, not(emptyList())); + for (ShardSearchRequest shardRequest : shardRequests) { + assertNotNull(shardRequest.source()); + assertNotNull(shardRequest.source().pointInTimeBuilder()); + assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); + } + } + + public void testDfsQueryPhaseRewrite() { + createIndex("index"); + prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + ReaderContext context = service.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + service.executeQueryPhase( + new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), + new SearchShardTask(42L, "", "", "", null, emptyMap()), + plainActionFuture + ); + + plainActionFuture.actionGet(); + assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); + final ShardSearchContextId contextId = context.id(); + assertTrue(service.freeReaderContext(contextId)); + } + + public void testEnableSearchWorkerThreads() throws IOException { + IndexService indexService = createIndex("index", Settings.EMPTY); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + indexShard.shardId(), + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { + SearchService service = getInstanceFromNode(SearchService.class); + SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); + + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertTrue(searchContext.searcher().hasExecutor()); + } + + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) + .get(); + assertTrue(response.isAcknowledged()); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertFalse(searchContext.searcher().hasExecutor()); + } + } finally { + // reset original default setting + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) + .get(); + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { + assertTrue(searchContext.searcher().hasExecutor()); + } + } + } + } + + /** + * Verify that a single slice is created for requests that don't support parallel collection, while an executor is still + * provided to the searcher to parallelize other operations. Also ensure multiple slices are created for requests that do support + * parallel collection. + */ + public void testSlicingBehaviourForParallelCollection() throws Exception { + IndexService indexService = createIndex("index", Settings.EMPTY); + ThreadPoolExecutor executor = (ThreadPoolExecutor) indexService.getThreadPool().executor(ThreadPool.Names.SEARCH); + final int configuredMaxPoolSize = 10; + executor.setMaximumPoolSize(configuredMaxPoolSize); // We set this explicitly to be independent of CPU cores. + int numDocs = randomIntBetween(50, 100); + for (int i = 0; i < numDocs; i++) { + prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); + if (i % 5 == 0) { + indicesAdmin().prepareRefresh("index").get(); + } + } + final IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + indexShard.shardId(), + 0, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + System.currentTimeMillis(), + null + ); + SearchService service = getInstanceFromNode(SearchService.class); + NonCountingTermQuery termQuery = new NonCountingTermQuery(new Term("field", "value")); + assertEquals(0, executor.getCompletedTaskCount()); + try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { + SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertTrue(searcher.hasExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "DFS supports parallel collection, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertTrue(searcher.hasExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertFalse(searcher.hasExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as FETCH does not support parallel collection and thus runs on the calling" + + " thread.", + 0, + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertFalse(searcher.hasExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 as NONE does not support parallel collection.", + 0, // zero since one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) + .get(); + assertTrue(response.isAcknowledged()); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertFalse(searcher.hasExecutor()); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "The number of slices should be 1 when QUERY parallel collection is disabled.", + 0, // zero since one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + } finally { + // Reset to the original default setting and check to ensure it takes effect. + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) + .get(); + { + try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { + ContextIndexSearcher searcher = searchContext.searcher(); + assertTrue(searcher.hasExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); + searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); + assertBusy( + () -> assertEquals( + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices - 1, // one slice executes on the calling thread + executor.getCompletedTaskCount() - priorExecutorTaskCount + ) + ); + } + } + } + } + } + + private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { + return new ReaderContext( + new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), + indexService, + indexShard, + indexShard.acquireSearcherSupplier(), + randomNonNegativeLong(), + false + ); + } + + private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { + + final int asyncRewriteCount; + final Supplier fetched; + + TestRewriteCounterQueryBuilder() { + asyncRewriteCount = 0; + fetched = null; + } + + private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { + this.asyncRewriteCount = asyncRewriteCount; + this.fetched = fetched; + } + + @Override + public String getWriteableName() { + return "test_query"; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ZERO; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException {} + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException {} + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + return new MatchAllDocsQuery(); + } + + @Override + protected boolean doEquals(TestRewriteCounterQueryBuilder other) { + return true; + } + + @Override + protected int doHashCode() { + return 42; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (asyncRewriteCount > 0) { + return this; + } + if (fetched != null) { + if (fetched.get() == null) { + return this; + } + assert fetched.get(); + return new TestRewriteCounterQueryBuilder(1, null); + } + if (queryRewriteContext.convertToDataRewriteContext() != null) { + SetOnce awaitingFetch = new SetOnce<>(); + queryRewriteContext.registerAsyncAction((c, l) -> { + awaitingFetch.set(true); + l.onResponse(null); + }); + return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); + } + return this; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 89fd25f638e1c..31bcab31ca8a7 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -6,3006 +6,298 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ + package org.elasticsearch.search; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TotalHitCountCollectorManager; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocWriteResponse; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.OpenPointInTimeRequest; -import org.elasticsearch.action.search.SearchPhaseController; -import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.search.SearchShardTask; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.search.TransportClosePointInTimeAction; -import org.elasticsearch.action.search.TransportOpenPointInTimeAction; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexModule; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.LeafFieldData; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MetadataFieldMapper; +import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.script.MockScriptEngine; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.SearchService.ResultsType; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.AggregationReduceContext; -import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.dfs.AggregatedDfs; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.ShardFetchRequest; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.search.internal.ReaderContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.query.NonCountingTermQuery; -import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.SearchTimeoutException; -import org.elasticsearch.search.rank.RankBuilder; -import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.rank.RankShardResult; -import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankShardResult; -import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; -import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; -import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; -import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; -import org.elasticsearch.search.rank.feature.RankFeatureDoc; -import org.elasticsearch.search.rank.feature.RankFeatureResult; -import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; -import org.elasticsearch.search.rank.feature.RankFeatureShardResult; -import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.tasks.TaskCancelHelper; -import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.Before; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.MinAndMax; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.Collections; -import java.util.Comparator; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.IntConsumer; -import java.util.function.Supplier; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; -import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; -import static org.elasticsearch.search.SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED; -import static org.elasticsearch.search.SearchService.SEARCH_WORKER_THREADS_ENABLED; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.startsWith; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.mock; +import java.util.function.BiFunction; +import java.util.function.Predicate; -public class SearchServiceTests extends ESSingleNodeTestCase { +public class SearchServiceTests extends IndexShardTestCase { - @Override - protected boolean resetNodeAfterTest() { - return true; + public void testCanMatchMatchAll() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); + doTestCanMatch(searchRequest, null, true, null, false); } - @Override - protected Collection> getPlugins() { - return pluginList( - FailOnRewriteQueryPlugin.class, - CustomScriptPlugin.class, - ReaderWrapperCountPlugin.class, - InternalOrPrivateSettingsPlugin.class, - MockSearchService.TestPlugin.class - ); + public void testCanMatchMatchNone() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + doTestCanMatch(searchRequest, null, false, null, false); } - public static class ReaderWrapperCountPlugin extends Plugin { - @Override - public void onIndexModule(IndexModule indexModule) { - indexModule.setReaderWrapper(service -> SearchServiceTests::apply); - } + public void testCanMatchMatchNoneWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); + doTestCanMatch(searchRequest, null, true, null, true); } - @Before - public void resetCount() { - numWrapInvocations = new AtomicInteger(0); + public void testCanMatchKeywordSortedQueryMatchNone() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchNoneQueryBuilder())); + SortField sortField = new SortField("field", SortField.Type.STRING); + doTestCanMatch(searchRequest, sortField, false, null, false); } - private static AtomicInteger numWrapInvocations = new AtomicInteger(0); - - private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException { - numWrapInvocations.incrementAndGet(); - return new FilterDirectoryReader(directoryReader, new FilterDirectoryReader.SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return reader; - } - }) { - @Override - protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { - return in; - } + public void testCanMatchKeywordSortedQueryMatchAll() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchAllQueryBuilder())); + SortField sortField = new SortField("field", SortField.Type.STRING); + MinAndMax expectedMinAndMax = new MinAndMax<>(new BytesRef("value"), new BytesRef("value")); + doTestCanMatch(searchRequest, sortField, true, expectedMinAndMax, false); + } + public void testCanMatchKeywordSortedQueryMatchNoneWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchNoneQueryBuilder())); + // provide a sort field that throws exception + SortField sortField = new SortField("field", SortField.Type.STRING) { @Override - public CacheHelper getReaderCacheHelper() { - return directoryReader.getReaderCacheHelper(); + public Type getType() { + throw new UnsupportedOperationException(); } }; + doTestCanMatch(searchRequest, sortField, false, null, false); } - public static class CustomScriptPlugin extends MockScriptPlugin { - - static final String DUMMY_SCRIPT = "dummyScript"; - - @Override - protected Map, Object>> pluginScripts() { - return Collections.singletonMap(DUMMY_SCRIPT, vars -> "dummy"); - } - - @Override - public void onIndexModule(IndexModule indexModule) { - indexModule.addSearchOperationListener(new SearchOperationListener() { - @Override - public void onFetchPhase(SearchContext context, long tookInNanos) { - if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); - } else { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - } - } - - @Override - public void onQueryPhase(SearchContext context, long tookInNanos) { - if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); - } else { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - } - } - }); - } - } - - @Override - protected Settings nodeSettings() { - return Settings.builder().put("search.default_search_timeout", "5s").build(); - } - - public void testClearOnClose() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test - assertEquals(0, service.getActiveContexts()); - } - - public void testClearOnStop() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.doStop(); - assertEquals(0, service.getActiveContexts()); - } - - public void testClearIndexDelete() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - assertAcked(indicesAdmin().prepareDelete("index")); - awaitIndexShardCloseAsyncTasks(); - assertEquals(0, service.getActiveContexts()); - } - - public void testCloseSearchContextOnRewriteException() { - // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off - createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - - final int activeContexts = service.getActiveContexts(); - final int activeRefs = indexShard.store().refCount(); - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get() - ); - assertEquals(activeContexts, service.getActiveContexts()); - assertEquals(activeRefs, indexShard.store().refCount()); - } - - public void testSearchWhileIndexDeleted() throws InterruptedException { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - AtomicBoolean running = new AtomicBoolean(true); - CountDownLatch startGun = new CountDownLatch(1); - final int permitCount = 100; - Semaphore semaphore = new Semaphore(permitCount); - ShardRouting routing = TestShardRouting.newShardRouting( - indexShard.shardId(), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ); - final Thread thread = new Thread(() -> { - startGun.countDown(); - while (running.get()) { - if (randomBoolean()) { - service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); - } else { - service.beforeIndexShardCreated(routing, indexService.getIndexSettings().getSettings()); - } - if (randomBoolean()) { - // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search - // context in a non-sane way. - try { - semaphore.acquire(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - prepareIndex("index").setSource("field", "value") - .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) - .execute(ActionListener.running(semaphore::release)); - } - } - }); - thread.start(); - startGun.await(); - try { - final int rounds = scaledRandomIntBetween(100, 10000); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) - .scroll(new Scroll(TimeValue.timeValueMinutes(1))); - for (int i = 0; i < rounds; i++) { - try { - try { - PlainActionFuture result = new PlainActionFuture<>(); - final boolean useScroll = randomBoolean(); - service.executeQueryPhase( - new ShardSearchRequest( - OriginalIndices.NONE, - useScroll ? scrollSearchRequest : searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ), - new SearchShardTask(123L, "", "", "", null, emptyMap()), - result.delegateFailure((l, r) -> { - r.incRef(); - l.onResponse(r); - }) - ); - final SearchPhaseResult searchPhaseResult = result.get(); - try { - List intCursors = new ArrayList<>(1); - intCursors.add(0); - ShardFetchRequest req = new ShardFetchRequest( - searchPhaseResult.getContextId(), - intCursors, - null/* not a scroll */ - ); - PlainActionFuture listener = new PlainActionFuture<>(); - service.executeFetchPhase(req, new SearchShardTask(123L, "", "", "", null, emptyMap()), listener); - listener.get(); - if (useScroll) { - // have to free context since this test does not remove the index from IndicesService. - service.freeReaderContext(searchPhaseResult.getContextId()); - } - } finally { - searchPhaseResult.decRef(); - } - } catch (ExecutionException ex) { - assertThat(ex.getCause(), instanceOf(RuntimeException.class)); - throw ((RuntimeException) ex.getCause()); - } - } catch (AlreadyClosedException ex) { - throw ex; - } catch (IllegalStateException ex) { - assertEquals(AbstractRefCounted.ALREADY_CLOSED_MESSAGE, ex.getMessage()); - } catch (SearchContextMissingException ex) { - // that's fine - } + public void testCanMatchKeywordSortedQueryMatchAllWithException() throws IOException { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) + .source(new SearchSourceBuilder().sort("field").query(new MatchAllQueryBuilder())); + // provide a sort field that throws exception + SortField sortField = new SortField("field", SortField.Type.STRING) { + @Override + public Type getType() { + throw new UnsupportedOperationException(); } - } finally { - running.set(false); - thread.join(); - semaphore.acquire(permitCount); - } - - assertEquals(0, service.getActiveContexts()); - - SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); - assertEquals(0, totalStats.getQueryCurrent()); - assertEquals(0, totalStats.getScrollCurrent()); - assertEquals(0, totalStats.getFetchCurrent()); + }; + doTestCanMatch(searchRequest, sortField, true, null, false); } - public void testRankFeaturePhaseSearchPhases() throws InterruptedException, ExecutionException { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex(indexName)); - final IndexShard indexShard = indexService.getShard(0); - SearchShardTask searchTask = new SearchShardTask(123L, "", "", "", null, emptyMap()); - - // create a SearchRequest that will return all documents and defines a TestRankBuilder with shard-level only operations - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) - .source( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(DEFAULT_SIZE) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = (numDocs - i) + randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - } - ) - ); - - ShardSearchRequest request = new ShardSearchRequest( + private void doTestCanMatch( + SearchRequest searchRequest, + SortField sortField, + boolean expectedCanMatch, + MinAndMax expectedMinAndMax, + boolean throwException + ) throws IOException { + ShardSearchRequest shardRequest = new ShardSearchRequest( OriginalIndices.NONE, searchRequest, - indexShard.shardId(), + new ShardId("index", "index", 0), 0, - 1, + 5, AliasFilter.EMPTY, 1.0f, - -1, + 0, null ); - QuerySearchResult queryResult = null; - RankFeatureResult rankResult = null; + IndexFieldData indexFieldData = indexFieldData(sortField); + IndexShard indexShard = newShard(true); try { - // Execute the query phase and store the result in a SearchPhaseResult container using a PlainActionFuture - PlainActionFuture queryPhaseResults = new PlainActionFuture<>(); - service.executeQueryPhase(request, searchTask, queryPhaseResults); - queryResult = (QuerySearchResult) queryPhaseResults.get(); - - // these are the matched docs from the query phase - final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; - - // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon - List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); - - // now we create a RankFeatureShardRequest to extract feature info for the top-docs above - RankFeatureShardRequest rankFeatureShardRequest = new RankFeatureShardRequest( - OriginalIndices.NONE, - queryResult.getContextId(), // use the context from the query phase - request, - topRankWindowSizeDocs - ); - PlainActionFuture rankPhaseResults = new PlainActionFuture<>(); - service.executeRankFeaturePhase(rankFeatureShardRequest, searchTask, rankPhaseResults); - rankResult = rankPhaseResults.get(); - - assertNotNull(rankResult); - assertNotNull(rankResult.rankFeatureResult()); - RankFeatureShardResult rankFeatureShardResult = rankResult.rankFeatureResult().shardResult(); - assertNotNull(rankFeatureShardResult); - - List sortedRankWindowDocs = topRankWindowSizeDocs.stream().sorted().toList(); - assertEquals(sortedRankWindowDocs.size(), rankFeatureShardResult.rankFeatureDocs.length); - for (int i = 0; i < sortedRankWindowDocs.size(); i++) { - assertEquals((long) sortedRankWindowDocs.get(i), rankFeatureShardResult.rankFeatureDocs[i].doc); - assertEquals(rankFeatureShardResult.rankFeatureDocs[i].featureData, "aardvark_" + sortedRankWindowDocs.get(i)); - } - - List globalTopKResults = randomNonEmptySubsetOf( - Arrays.stream(rankFeatureShardResult.rankFeatureDocs).map(x -> x.doc).toList() - ); - - // finally let's create a fetch request to bring back fetch info for the top results - ShardFetchSearchRequest fetchRequest = new ShardFetchSearchRequest( - OriginalIndices.NONE, - rankResult.getContextId(), - request, - globalTopKResults, - null, - null, - rankResult.getRescoreDocIds(), - null - ); - - // execute fetch phase and perform any validations once we retrieve the response - // the difference in how we do assertions here is needed because once the transport service sends back the response - // it decrements the reference to the FetchSearchResult (through the ActionListener#respondAndRelease) and sets hits to null - PlainActionFuture fetchListener = new PlainActionFuture<>() { - @Override - public void onResponse(FetchSearchResult fetchSearchResult) { - assertNotNull(fetchSearchResult); - assertNotNull(fetchSearchResult.hits()); - - int totalHits = fetchSearchResult.hits().getHits().length; - assertEquals(globalTopKResults.size(), totalHits); - for (int i = 0; i < totalHits; i++) { - // rank and score are set by the SearchPhaseController#merge so no need to validate that here - SearchHit hit = fetchSearchResult.hits().getAt(i); - assertNotNull(hit.getFields().get(fetchFieldName)); - assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); - } - super.onResponse(fetchSearchResult); - } - - @Override - public void onFailure(Exception e) { - super.onFailure(e); - throw new AssertionError("No failure should have been raised", e); - } - }; - service.executeFetchPhase(fetchRequest, searchTask, fetchListener); - fetchListener.get(); - } catch (Exception ex) { - if (queryResult != null) { - if (queryResult.hasReferences()) { - queryResult.decRef(); - } - service.freeReaderContext(queryResult.getContextId()); - } - if (rankResult != null && rankResult.hasReferences()) { - rankResult.decRef(); - } - throw ex; - } - } - - public void testRankFeaturePhaseUsingClient() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 4; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - ElasticsearchAssertions.assertResponse( - client().prepareSearch(indexName) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(2) - .from(2) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - } - ) - ), - (response) -> { - SearchHits hits = response.getHits(); - assertEquals(hits.getTotalHits().value(), numDocs); - assertEquals(hits.getHits().length, 2); - int index = 0; - for (SearchHit hit : hits.getHits()) { - assertEquals(hit.getRank(), 3 + index); - assertTrue(hit.getScore() >= 0); - assertEquals(hit.getFields().get(fetchFieldName).getValue(), fetchFieldValue + "_" + hit.docId()); - index++; - } - } - ); - } - - public void testRankFeaturePhaseExceptionOnCoordinatingNode() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(indexName) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .size(2) - .from(2) - .fetchField(fetchFieldName) - .rankBuilder(new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - throw new IllegalStateException("should have failed earlier"); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - throw new UnsupportedOperationException("simulated failure"); - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - }; - } - }) - ) - .get() - ); - } - - public void testRankFeaturePhaseExceptionAllShardFail() { - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(indexName) - .setAllowPartialSearchResults(true) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - throw new UnsupportedOperationException("simulated failure"); - } - }; - } - } - ) - ) - .get() - ); - } - - public void testRankFeaturePhaseExceptionOneShardFails() { - // if we have only one shard and it fails, it will fallback to context.onPhaseFailure which will eventually clean up all contexts. - // in this test we want to make sure that even if one shard (of many) fails during the RankFeaturePhase, then the appropriate - // context will have been cleaned up. - final String indexName = "index"; - final String rankFeatureFieldName = "field"; - final String searchFieldName = "search_field"; - final String searchFieldValue = "some_value"; - final String fetchFieldName = "fetch_field"; - final String fetchFieldValue = "fetch_value"; - - final int minDocs = 3; - final int maxDocs = 10; - int numDocs = between(minDocs, maxDocs); - createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).build()); - // index some documents - for (int i = 0; i < numDocs; i++) { - prepareIndex(indexName).setId(String.valueOf(i)) - .setSource( - rankFeatureFieldName, - "aardvark_" + i, - searchFieldName, - searchFieldValue, - fetchFieldName, - fetchFieldValue + "_" + i - ) - .get(); - } - indicesAdmin().prepareRefresh(indexName).get(); - - assertResponse( - client().prepareSearch(indexName) - .setAllowPartialSearchResults(true) - .setSource( - new SearchSourceBuilder().query(new TermQueryBuilder(searchFieldName, searchFieldValue)) - .fetchField(fetchFieldName) - .rankBuilder( - // here we override only the shard-level contexts - new TestRankBuilder(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - - // no need for more than one queries - @Override - public boolean isCompoundBuilder() { - return false; - } - - @Override - public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext( - int size, - int from, - Client client - ) { - return new RankFeaturePhaseRankCoordinatorContext(size, from, DEFAULT_RANK_WINDOW_SIZE) { - @Override - protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { - float[] scores = new float[featureDocs.length]; - for (int i = 0; i < featureDocs.length; i++) { - scores[i] = featureDocs[i].score; - } - scoreListener.onResponse(scores); - } - }; - } - - @Override - public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { - return new QueryPhaseRankCoordinatorContext(RankBuilder.DEFAULT_RANK_WINDOW_SIZE) { - @Override - public ScoreDoc[] rankQueryPhaseResults( - List querySearchResults, - SearchPhaseController.TopDocsStats topDocStats - ) { - List rankDocs = new ArrayList<>(); - for (int i = 0; i < querySearchResults.size(); i++) { - QuerySearchResult querySearchResult = querySearchResults.get(i); - TestRankShardResult shardResult = (TestRankShardResult) querySearchResult - .getRankShardResult(); - for (RankDoc trd : shardResult.testRankDocs) { - trd.shardIndex = i; - rankDocs.add(trd); - } - } - rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); - RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); - topDocStats.fetchHits = topResults.length; - return topResults; - } - }; - } - - @Override - public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { - return new QueryPhaseRankShardContext(queries, from) { - - @Override - public int rankWindowSize() { - return DEFAULT_RANK_WINDOW_SIZE; - } - - @Override - public RankShardResult combineQueryPhaseResults(List rankResults) { - // we know we have just 1 query, so return all the docs from it - return new TestRankShardResult( - Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) - .limit(rankWindowSize()) - .toArray(RankDoc[]::new) - ); - } - }; - } - - @Override - public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { - return new RankFeaturePhaseRankShardContext(rankFeatureFieldName) { - @Override - public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) { - if (shardId == 0) { - throw new UnsupportedOperationException("simulated failure"); - } else { - RankFeatureDoc[] rankFeatureDocs = new RankFeatureDoc[hits.getHits().length]; - for (int i = 0; i < hits.getHits().length; i++) { - SearchHit hit = hits.getHits()[i]; - rankFeatureDocs[i] = new RankFeatureDoc(hit.docId(), hit.getScore(), shardId); - rankFeatureDocs[i].featureData(hit.getFields().get(rankFeatureFieldName).getValue()); - rankFeatureDocs[i].score = randomFloat(); - rankFeatureDocs[i].rank = i + 1; - } - return new RankFeatureShardResult(rankFeatureDocs); - } - } - }; - } - } - ) - ), - (searchResponse) -> { - assertEquals(1, searchResponse.getSuccessfulShards()); - assertEquals("simulated failure", searchResponse.getShardFailures()[0].getCause().getMessage()); - assertNotEquals(0, searchResponse.getHits().getHits().length); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertEquals(fetchFieldValue + "_" + hit.getId(), hit.getFields().get(fetchFieldName).getValue()); - assertEquals(1, hit.getShard().getShardId().id()); + recoverShardFromStore(indexShard); + assertTrue(indexDoc(indexShard, "_doc", "id", "{\"field\":\"value\"}").isCreated()); + assertTrue(indexShard.refresh("test").refreshed()); + try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) { + SearchExecutionContext searchExecutionContext = createSearchExecutionContext( + (mappedFieldType, fieldDataContext) -> indexFieldData, + searcher + ); + SearchService.CanMatchContext canMatchContext = createCanMatchContext( + shardRequest, + indexShard, + searchExecutionContext, + parserConfig(), + throwException + ); + CanMatchShardResponse canMatchShardResponse = SearchService.canMatch(canMatchContext, false); + assertEquals(expectedCanMatch, canMatchShardResponse.canMatch()); + if (expectedMinAndMax == null) { + assertNull(canMatchShardResponse.estimatedMinAndMax()); + } else { + MinAndMax minAndMax = canMatchShardResponse.estimatedMinAndMax(); + assertNotNull(minAndMax); + assertEquals(expectedMinAndMax.getMin(), minAndMax.getMin()); + assertEquals(expectedMinAndMax.getMin(), minAndMax.getMax()); } - } - ); - } - - public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws ExecutionException, InterruptedException { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - - MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - service.setOnPutContext(context -> { - if (context.indexShard() == indexShard) { - assertAcked(indicesAdmin().prepareDelete("index")); } - }); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchRequest scrollSearchRequest = new SearchRequest().allowPartialSearchResults(true) - .scroll(new Scroll(TimeValue.timeValueMinutes(1))); - - // the scrolls are not explicitly freed, but should all be gone when the test finished. - // for completeness, we also randomly test the regular search path. - final boolean useScroll = randomBoolean(); - PlainActionFuture result = new PlainActionFuture<>(); - service.executeQueryPhase( - new ShardSearchRequest( - OriginalIndices.NONE, - useScroll ? scrollSearchRequest : searchRequest, - new ShardId(resolveIndex("index"), 0), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ), - new SearchShardTask(123L, "", "", "", null, emptyMap()), - result - ); - - try { - result.get(); - } catch (Exception e) { - // ok - } - - expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().setIndices("index").get()); - - assertEquals(0, service.getActiveContexts()); - - SearchStats.Stats totalStats = indexShard.searchStats().getTotal(); - assertEquals(0, totalStats.getQueryCurrent()); - assertEquals(0, totalStats.getScrollCurrent()); - assertEquals(0, totalStats.getFetchCurrent()); - } - - public void testBeforeShardLockDuringShardCreate() { - IndexService indexService = createIndex("index", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertResponse( - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), - searchResponse -> assertThat(searchResponse.getScrollId(), is(notNullValue())) - ); - SearchService service = getInstanceFromNode(SearchService.class); - - assertEquals(1, service.getActiveContexts()); - service.beforeIndexShardCreated( - TestShardRouting.newShardRouting( - "test", - 0, - randomAlphaOfLength(5), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ), - indexService.getIndexSettings().getSettings() - ); - assertEquals(1, service.getActiveContexts()); - - service.beforeIndexShardCreated( - TestShardRouting.newShardRouting( - new ShardId(indexService.index(), 0), - randomAlphaOfLength(5), - randomBoolean(), - ShardRoutingState.INITIALIZING - ), - indexService.getIndexSettings().getSettings() - ); - assertEquals(0, service.getActiveContexts()); - } - - public void testTimeout() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - final ShardSearchRequest requestWithDefaultTimeout = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), + } finally { + closeShards(indexShard); + } + } + + private SearchExecutionContext createSearchExecutionContext( + BiFunction> indexFieldDataLookup, + IndexSearcher searcher + ) { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + + MapperBuilderContext root = MapperBuilderContext.root(false, false); + RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc", ObjectMapper.Defaults.SUBOBJECTS); + Mapping mapping = new Mapping( + builder.build(MapperBuilderContext.root(false, false)), + new MetadataFieldMapper[0], + Collections.emptyMap() + ); + KeywordFieldMapper keywordFieldMapper = new KeywordFieldMapper.Builder("field", IndexVersion.current()).build(root); + MappingLookup mappingLookup = MappingLookup.fromMappers( + mapping, + Collections.singletonList(keywordFieldMapper), + Collections.emptyList() + ); + return new SearchExecutionContext( 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext contextWithDefaultTimeout = service.createContext( - reader, - requestWithDefaultTimeout, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) - ) { - // the search context should inherit the default timeout - assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); - } - - final long seconds = randomIntBetween(6, 10); - searchRequest.source(new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds))); - final ShardSearchRequest requestWithCustomTimeout = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null + indexSettings, + null, + indexFieldDataLookup, + null, + mappingLookup, + null, + null, + parserConfig(), + writableRegistry(), + null, + searcher, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null, + Collections.emptyMap(), + MapperMetrics.NOOP ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext( - reader, - requestWithCustomTimeout, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) - ) { - // the search context should inherit the query timeout - assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); - } } - /** - * test that getting more than the allowed number of docvalue_fields throws an exception - */ - public void testMaxDocvalueFieldsSearch() throws IOException { - final Settings settings = Settings.builder().put(IndexSettings.MAX_DOCVALUE_FIELDS_SEARCH_SETTING.getKey(), 1).build(); - createIndex("index", settings, null, "field1", "keyword", "field2", "keyword"); - prepareIndex("index").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.docValueField("field1"); - - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertNotNull(context); - } - - searchSourceBuilder.docValueField("unmapped_field"); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertNotNull(context); - } - - searchSourceBuilder.docValueField("field2"); - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals( - "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [1] but was [2]. " - + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", - ex.getMessage() - ); - } - } + private static IndexFieldData indexFieldData(SortField sortField) { + return new IndexFieldData<>() { + @Override + public String getFieldName() { + return "field"; + } - public void testDeduplicateDocValuesFields() throws Exception { - createIndex("index", Settings.EMPTY, "_doc", "field1", "type=date", "field2", "type=date"); - prepareIndex("index").setId("1").setSource("field1", "2022-08-03", "field2", "2022-08-04").setRefreshPolicy(IMMEDIATE).get(); - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); + @Override + public ValuesSourceType getValuesSourceType() { + throw new UnsupportedOperationException(); + } - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.docValueField("f*"); - if (randomBoolean()) { - searchSourceBuilder.docValueField("field*"); + @Override + public LeafFieldData load(LeafReaderContext context) { + throw new UnsupportedOperationException(); } - if (randomBoolean()) { - searchSourceBuilder.docValueField("*2"); + + @Override + public LeafFieldData loadDirect(LeafReaderContext context) { + throw new UnsupportedOperationException(); } - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - SearchContext context = service.createContext( - reader, - request, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) + + @Override + public SortField sortField( + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + boolean reverse ) { - Collection fields = context.docValuesContext().fields(); - assertThat(fields, containsInAnyOrder(new FieldAndFormat("field1", null), new FieldAndFormat("field2", null))); + return sortField; } - } - } - - /** - * test that getting more than the allowed number of script_fields throws an exception - */ - public void testMaxScriptFieldsSearch() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - // adding the maximum allowed number of script_fields to retrieve - int maxScriptFields = indexService.getIndexSettings().getMaxScriptFields(); - for (int i = 0; i < maxScriptFields; i++) { - searchSourceBuilder.scriptField( - "field" + i, - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - } - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try (ReaderContext reader = createReaderContext(indexService, indexShard)) { - try ( - SearchContext context = service.createContext( - reader, - request, - mock(SearchShardTask.class), - ResultsType.NONE, - randomBoolean() - ) + @Override + public BucketedSort newBucketedSort( + BigArrays bigArrays, + Object missingValue, + MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, + SortOrder sortOrder, + DocValueFormat format, + int bucketSize, + BucketedSort.ExtraData extra ) { - assertNotNull(context); + throw new UnsupportedOperationException(); } - searchSourceBuilder.scriptField( - "anotherScriptField", - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals( - "Trying to retrieve too many script_fields. Must be less than or equal to: [" - + maxScriptFields - + "] but was [" - + (maxScriptFields + 1) - + "]. This limit can be set by changing the [index.max_script_fields] index level setting.", - ex.getMessage() - ); - } - } - - public void testIgnoreScriptfieldIfSizeZero() throws IOException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchRequest.source(searchSourceBuilder); - searchSourceBuilder.scriptField( - "field" + 0, - new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, emptyMap()) - ); - searchSourceBuilder.size(0); - final ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - try ( - ReaderContext reader = createReaderContext(indexService, indexShard); - SearchContext context = service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ) { - assertEquals(0, context.scriptFields().fields().size()); - } + }; } - /** - * test that creating more than the allowed number of scroll contexts throws an exception - */ - public void testMaxOpenScrollContexts() throws Exception { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - // Open all possible scrolls, clear some of them, then open more until the limit is reached - LinkedList clearScrollIds = new LinkedList<>(); - - for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) { - assertResponse(client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)), searchResponse -> { - if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId()); - }); - } + private static SearchService.CanMatchContext createCanMatchContext( + ShardSearchRequest shardRequest, + IndexShard indexShard, + SearchExecutionContext searchExecutionContext, + XContentParserConfiguration parserConfig, + boolean throwException + ) { + return new SearchService.CanMatchContext(shardRequest, null, null, -1, -1) { + @Override + IndexShard getShard() { + return indexShard; + } - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.setScrollIds(clearScrollIds); - client().clearScroll(clearScrollRequest).get(); + @Override + QueryRewriteContext getQueryRewriteContext(IndexService indexService) { + if (throwException) { + throw new IllegalArgumentException(); + } + return new QueryRewriteContext(parserConfig, null, System::currentTimeMillis); + } - for (int i = 0; i < clearScrollIds.size(); i++) { - client().prepareSearch("index").setSize(1).setScroll(TimeValue.timeValueMinutes(1)).get().decRef(); - } + @Override + SearchExecutionContext getSearchExecutionContext(Engine.Searcher searcher) { + return searchExecutionContext; + } - final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); - ElasticsearchException ex = expectThrows( - ElasticsearchException.class, - () -> service.createAndPutReaderContext( - request, - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ) - ); - assertEquals( - "Trying to create too many scroll contexts. Must be less than or equal to: [" - + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) - + "]. " - + "This limit can be set by changing the [search.max_open_scroll_context] setting.", - ex.getMessage() - ); - assertEquals(RestStatus.TOO_MANY_REQUESTS, ex.status()); - - service.freeAllScrollContexts(); - } - - public void testOpenScrollContextsConcurrently() throws Exception { - createIndex("index"); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - - final int maxScrollContexts = SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); - final SearchService searchService = getInstanceFromNode(SearchService.class); - Thread[] threads = new Thread[randomIntBetween(2, 8)]; - CountDownLatch latch = new CountDownLatch(threads.length); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - latch.countDown(); - try { - latch.await(); - for (;;) { - final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); - try { - final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); - searchService.createAndPutReaderContext( - request, - indexService, - indexShard, - reader, - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - } catch (ElasticsearchException e) { - assertThat( - e.getMessage(), - equalTo( - "Trying to create too many scroll contexts. Must be less than or equal to: " - + "[" - + maxScrollContexts - + "]. " - + "This limit can be set by changing the [search.max_open_scroll_context] setting." - ) - ); - return; - } - } - } catch (Exception e) { - throw new AssertionError(e); - } - }); - threads[i].setName("elasticsearch[node_s_0][search]"); - threads[i].start(); - } - for (Thread thread : threads) { - thread.join(); - } - assertThat(searchService.getActiveContexts(), equalTo(maxScrollContexts)); - searchService.freeAllScrollContexts(); - } - - public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { - @Override - public List> getQueries() { - return singletonList(new QuerySpec<>("fail_on_rewrite_query", FailOnRewriteQueryBuilder::new, parseContext -> { - throw new UnsupportedOperationException("No query parser for this plugin"); - })); - } - } - - public static class FailOnRewriteQueryBuilder extends DummyQueryBuilder { - - public FailOnRewriteQueryBuilder(StreamInput in) throws IOException { - super(in); - } - - public FailOnRewriteQueryBuilder() {} - - @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { - if (queryRewriteContext.convertToSearchExecutionContext() != null) { - throw new IllegalStateException("Fail on rewrite phase"); - } - return this; - } - } - - private static class ShardScrollRequestTest extends ShardSearchRequest { - private Scroll scroll; - - ShardScrollRequestTest(ShardId shardId) { - super( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(true), - shardId, - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null - ); - this.scroll = new Scroll(TimeValue.timeValueMinutes(1)); - } - - @Override - public Scroll scroll() { - return this.scroll; - } - } - - public void testCanMatch() throws Exception { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder()); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) - ); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - searchRequest.source( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) - ); - assertTrue( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - - searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); - assertFalse( - service.canMatch( - new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null) - ).canMatch() - ); - assertEquals(5, numWrapInvocations.get()); - - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - /* - * Checks that canMatch takes into account the alias filter - */ - // the source cannot be rewritten to a match_none - searchRequest.indices("alias").source(new SearchSourceBuilder().query(new MatchAllQueryBuilder())); - assertFalse( - service.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), - 1f, - -1, - null - ) - ).canMatch() - ); - // the source can match and can be rewritten to a match_none, but not the alias filter - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - searchRequest.indices("alias").source(new SearchSourceBuilder().query(new TermQueryBuilder("id", "1"))); - assertFalse( - service.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.of(new TermQueryBuilder("foo", "bar"), "alias"), - 1f, - -1, - null - ) - ).canMatch() - ); - - CountDownLatch latch = new CountDownLatch(1); - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - // Because the foo field used in alias filter is unmapped the term query builder rewrite can resolve to a match no docs query, - // without acquiring a searcher and that means the wrapper is not called - assertEquals(5, numWrapInvocations.get()); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - // make sure that the wrapper is called when the query is actually executed - assertEquals(6, numWrapInvocations.get()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - public void testCanRewriteToMatchNone() { - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()).aggregation(new GlobalAggregationBuilder("test")) - ) - ); - assertFalse(SearchService.canRewriteToMatchNone(new SearchSourceBuilder())); - assertFalse(SearchService.canRewriteToMatchNone(null)); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(0)) - ) - ); - assertTrue(SearchService.canRewriteToMatchNone(new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")))); - assertTrue( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) - ) - ); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) - .aggregation(new TermsAggregationBuilder("test").userValueTypeHint(ValueType.STRING).minDocCount(1)) - .suggest(new SuggestBuilder()) - ) - ); - assertFalse( - SearchService.canRewriteToMatchNone( - new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).suggest(new SuggestBuilder()) - ) - ); - } - - public void testSetSearchThrottled() throws IOException { - createIndex("throttled_threadpool_index"); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "true" - ) - ).actionGet(); - final SearchService service = getInstanceFromNode(SearchService.class); - Index index = resolveIndex("throttled_threadpool_index"); - assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertSearchHits( - client().prepareSearch("throttled_threadpool_index") - .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) - .setSize(1), - "1" - ); - // we add a search action listener in a plugin above to assert that this is actually used - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "false" - ) - ).actionGet(); - - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("throttled_threadpool_index") - .setSettings(Settings.builder().put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), false)) - .get() - ); - assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); - assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - } - - public void testAggContextGetsMatchAll() throws IOException { - createIndex("test"); - withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); - } - - public void testAggContextGetsNestedFilter() throws IOException { - XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("properties"); - mapping.startObject("nested").field("type", "nested").endObject(); - mapping.endObject().endObject(); - - createIndex("test", Settings.EMPTY, mapping); - withAggregationContext("test", context -> assertThat(context.query(), equalTo(new MatchAllDocsQuery()))); - } - - /** - * Build an {@link AggregationContext} with the named index. - */ - private void withAggregationContext(String index, Consumer check) throws IOException { - IndexService indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(resolveIndex(index)); - ShardId shardId = new ShardId(indexService.index(), 0); - - SearchRequest request = new SearchRequest().indices(index) - .source(new SearchSourceBuilder().aggregation(new FiltersAggregationBuilder("test", new MatchAllQueryBuilder()))) - .allowPartialSearchResults(false); - ShardSearchRequest shardRequest = new ShardSearchRequest( - OriginalIndices.NONE, - request, - shardId, - 0, - 1, - AliasFilter.EMPTY, - 1, - 0, - null - ); - - try (ReaderContext readerContext = createReaderContext(indexService, indexService.getShard(0))) { - try ( - SearchContext context = getInstanceFromNode(SearchService.class).createContext( - readerContext, - shardRequest, - mock(SearchShardTask.class), - ResultsType.QUERY, - true - ) - ) { - check.accept(context.aggregations().factories().context()); - } - } - } - - public void testExpandSearchThrottled() { - createIndex("throttled_threadpool_index"); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request( - "throttled_threadpool_index", - IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), - "true" - ) - ).actionGet(); - - prepareIndex("throttled_threadpool_index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch(), 1L); - assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); - } - - public void testExpandSearchFrozen() { - String indexName = "frozen_index"; - createIndex(indexName); - client().execute( - InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, - new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request(indexName, "index.frozen", "true") - ).actionGet(); - - prepareIndex(indexName).setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch(), 0L); - assertHitCount(client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED), 1L); - assertWarnings(TransportSearchAction.FROZEN_INDICES_DEPRECATION_MESSAGE.replace("{}", indexName)); - } - - public void testCreateReduceContext() { - SearchService service = getInstanceFromNode(SearchService.class); - AggregationReduceContext.Builder reduceContextBuilder = service.aggReduceContextBuilder( - () -> false, - new SearchRequest().source(new SearchSourceBuilder()).source().aggregations() - ); - { - AggregationReduceContext reduceContext = reduceContextBuilder.forFinalReduction(); - expectThrows( - MultiBucketConsumerService.TooManyBucketsException.class, - () -> reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1) - ); - } - { - AggregationReduceContext reduceContext = reduceContextBuilder.forPartialReduction(); - reduceContext.consumeBucketsAndMaybeBreak(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS + 1); - } - } - - public void testMultiBucketConsumerServiceCB() { - MultiBucketConsumerService service = new MultiBucketConsumerService( - getInstanceFromNode(ClusterService.class), - Settings.EMPTY, - new NoopCircuitBreaker("test") { - - @Override - public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { - throw new CircuitBreakingException("tripped", getDurability()); - } - } - ); - // for partial - { - IntConsumer consumer = service.createForPartial(); - for (int i = 0; i < 1023; i++) { - consumer.accept(0); - } - CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); - assertThat(ex.getMessage(), equalTo("tripped")); - } - // for final - { - IntConsumer consumer = service.createForFinal(); - for (int i = 0; i < 1023; i++) { - consumer.accept(0); - } - CircuitBreakingException ex = expectThrows(CircuitBreakingException.class, () -> consumer.accept(0)); - assertThat(ex.getMessage(), equalTo("tripped")); - } - } - - public void testCreateSearchContext() throws IOException { - String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); - IndexService indexService = createIndex(index); - final SearchService service = getInstanceFromNode(SearchService.class); - ShardId shardId = new ShardId(indexService.index(), 0); - long nowInMillis = System.currentTimeMillis(); - String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); - SearchRequest searchRequest = new SearchRequest(); - searchRequest.allowPartialSearchResults(randomBoolean()); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - shardId, - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - nowInMillis, - clusterAlias - ); - try (SearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { - SearchShardTarget searchShardTarget = searchContext.shardTarget(); - SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); - String expectedIndexName = clusterAlias == null ? index : clusterAlias + ":" + index; - assertEquals(expectedIndexName, searchExecutionContext.getFullyQualifiedIndex().getName()); - assertEquals(expectedIndexName, searchShardTarget.getFullyQualifiedIndexName()); - assertEquals(clusterAlias, searchShardTarget.getClusterAlias()); - assertEquals(shardId, searchShardTarget.getShardId()); - - assertNull(searchContext.dfsResult()); - searchContext.addDfsResult(); - assertSame(searchShardTarget, searchContext.dfsResult().getSearchShardTarget()); - - assertNull(searchContext.queryResult()); - searchContext.addQueryResult(); - assertSame(searchShardTarget, searchContext.queryResult().getSearchShardTarget()); - - assertNull(searchContext.fetchResult()); - searchContext.addFetchResult(); - assertSame(searchShardTarget, searchContext.fetchResult().getSearchShardTarget()); - } - } - - /** - * While we have no NPE in DefaultContext constructor anymore, we still want to guard against it (or other failures) in the future to - * avoid leaking searchers. - */ - public void testCreateSearchContextFailure() throws Exception { - final String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); - final IndexService indexService = createIndex(index); - final SearchService service = getInstanceFromNode(SearchService.class); - final ShardId shardId = new ShardId(indexService.index(), 0); - final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { @Override - public SearchType searchType() { - // induce an artificial NPE - throw new NullPointerException("expected"); + IndexService getIndexService() { + // it's ok to return null because the three above methods are overridden + return null; } }; - try (ReaderContext reader = createReaderContext(indexService, indexService.getShard(shardId.id()))) { - NullPointerException e = expectThrows( - NullPointerException.class, - () -> service.createContext(reader, request, mock(SearchShardTask.class), ResultsType.NONE, randomBoolean()) - ); - assertEquals("expected", e.getMessage()); - } - // Needs to busily assert because Engine#refreshNeeded can increase the refCount. - assertBusy( - () -> assertEquals("should have 2 store refs (IndexService + InternalEngine)", 2, indexService.getShard(0).store().refCount()) - ); - } - - public void testMatchNoDocsEmptyResponse() throws InterruptedException { - createIndex("index"); - Thread currentThread = Thread.currentThread(); - SearchService service = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false) - .source(new SearchSourceBuilder().aggregation(AggregationBuilders.count("count").field("value"))); - ShardSearchRequest shardRequest = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 5, - AliasFilter.EMPTY, - 1.0f, - 0, - null - ); - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.source().query(new MatchAllQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - assertNotSame(Thread.currentThread(), currentThread); - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertFalse(result.queryResult().isNull()); - assertNotNull(result.queryResult().topDocs()); - assertNotNull(result.queryResult().aggregations()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception exc) { - try { - throw new AssertionError(exc); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.source().query(new MatchNoneQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - assertNotSame(Thread.currentThread(), currentThread); - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertFalse(result.queryResult().isNull()); - assertNotNull(result.queryResult().topDocs()); - assertNotNull(result.queryResult().aggregations()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception exc) { - try { - throw new AssertionError(exc); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - - { - CountDownLatch latch = new CountDownLatch(1); - shardRequest.canReturnNullResponseIfMatchNoDocs(true); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult result) { - try { - // make sure we don't use the search threadpool - assertSame(Thread.currentThread(), currentThread); - assertThat(result, instanceOf(QuerySearchResult.class)); - assertTrue(result.queryResult().isNull()); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } - } - }); - latch.await(); - } - } - - public void testDeleteIndexWhileSearch() throws Exception { - createIndex("test"); - int numDocs = randomIntBetween(1, 20); - for (int i = 0; i < numDocs; i++) { - prepareIndex("test").setSource("f", "v").get(); - } - indicesAdmin().prepareRefresh("test").get(); - AtomicBoolean stopped = new AtomicBoolean(false); - Thread[] searchers = new Thread[randomIntBetween(1, 4)]; - CountDownLatch latch = new CountDownLatch(searchers.length); - for (int i = 0; i < searchers.length; i++) { - searchers[i] = new Thread(() -> { - latch.countDown(); - while (stopped.get() == false) { - try { - client().prepareSearch("test").setRequestCache(false).get().decRef(); - } catch (Exception ignored) { - return; - } - } - }); - searchers[i].start(); - } - latch.await(); - indicesAdmin().prepareDelete("test").get(); - stopped.set(true); - for (Thread searcher : searchers) { - searcher.join(); - } - } - - public void testLookUpSearchContext() throws Exception { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - IndexShard indexShard = indexService.getShard(0); - List contextIds = new ArrayList<>(); - int numContexts = randomIntBetween(1, 10); - CountDownLatch latch = new CountDownLatch(1); - indexShard.getThreadPool().executor(ThreadPool.Names.SEARCH).execute(() -> { - try { - for (int i = 0; i < numContexts; i++) { - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(true), - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - final ReaderContext context = searchService.createAndPutReaderContext( - request, - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - assertThat(context.id().getId(), equalTo((long) (i + 1))); - contextIds.add(context.id()); - } - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - while (contextIds.isEmpty() == false) { - final ShardSearchContextId contextId = randomFrom(contextIds); - assertFalse(searchService.freeReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - if (randomBoolean()) { - assertTrue(searchService.freeReaderContext(contextId)); - } else { - assertTrue( - searchService.freeReaderContext((new ShardSearchContextId(contextId.getSessionId(), contextId.getId()))) - ); - } - contextIds.remove(contextId); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - assertFalse(searchService.freeReaderContext(contextId)); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - } - } finally { - latch.countDown(); - } - }); - latch.await(); - } - - public void testOpenReaderContext() { - createIndex("index"); - SearchService searchService = getInstanceFromNode(SearchService.class); - PlainActionFuture future = new PlainActionFuture<>(); - searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); - assertThat(searchService.getActiveContexts(), equalTo(1)); - assertTrue(searchService.freeReaderContext(future.actionGet())); - } - - public void testCancelQueryPhaseEarly() throws Exception { - createIndex("index"); - final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - - CountDownLatch latch1 = new CountDownLatch(1); - SearchShardTask task = new SearchShardTask(1, "", "", "", TaskId.EMPTY_TASK_ID, emptyMap()); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch1.countDown(); - } - - @Override - public void onFailure(Exception e) { - try { - fail("Search should not be cancelled"); - } finally { - latch1.countDown(); - } - } - }); - latch1.await(); - - CountDownLatch latch2 = new CountDownLatch(1); - service.executeDfsPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch2.countDown(); - } - - @Override - public void onFailure(Exception e) { - try { - fail("Search should not be cancelled"); - } finally { - latch2.countDown(); - } - } - }); - latch2.await(); - - AtomicBoolean searchContextCreated = new AtomicBoolean(false); - service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); - CountDownLatch latch3 = new CountDownLatch(1); - TaskCancelHelper.cancel(task, "simulated"); - service.executeQueryPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - fail("Search not cancelled early"); - } finally { - service.freeReaderContext(searchPhaseResult.getContextId()); - searchPhaseResult.decRef(); - latch3.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - assertThat(e, is(instanceOf(TaskCancelledException.class))); - assertThat(e.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); - assertThat(searchContextCreated.get(), is(false)); - latch3.countDown(); - } - }); - latch3.await(); - - searchContextCreated.set(false); - CountDownLatch latch4 = new CountDownLatch(1); - service.executeDfsPhase(request, task, new ActionListener<>() { - @Override - public void onResponse(SearchPhaseResult searchPhaseResult) { - try { - fail("Search not cancelled early"); - } finally { - service.freeReaderContext(searchPhaseResult.getContextId()); - latch4.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - assertThat(e, is(instanceOf(TaskCancelledException.class))); - assertThat(e.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) e).status(), is(RestStatus.BAD_REQUEST)); - assertThat(searchContextCreated.get(), is(false)); - latch4.countDown(); - } - }); - latch4.await(); - } - - public void testCancelFetchPhaseEarly() throws Exception { - createIndex("index"); - final MockSearchService service = (MockSearchService) getInstanceFromNode(SearchService.class); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - - AtomicBoolean searchContextCreated = new AtomicBoolean(false); - service.setOnCreateSearchContext(c -> searchContextCreated.set(true)); - - // Test fetch phase is cancelled early - String scrollId; - var searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); - try { - scrollId = searchResponse.getScrollId(); - } finally { - searchResponse.decRef(); - } - - client().searchScroll(new SearchScrollRequest(scrollId)).get().decRef(); - assertThat(searchContextCreated.get(), is(true)); - - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(scrollId); - client().clearScroll(clearScrollRequest); - - searchResponse = client().search(searchRequest.allowPartialSearchResults(false).scroll(TimeValue.timeValueMinutes(10))).get(); - try { - scrollId = searchResponse.getScrollId(); - } finally { - searchResponse.decRef(); - } - searchContextCreated.set(false); - service.setOnCheckCancelled(t -> { - SearchShardTask task = new SearchShardTask(randomLong(), "transport", "action", "", TaskId.EMPTY_TASK_ID, emptyMap()); - TaskCancelHelper.cancel(task, "simulated"); - return task; - }); - CountDownLatch latch = new CountDownLatch(1); - client().searchScroll(new SearchScrollRequest(scrollId), new ActionListener<>() { - @Override - public void onResponse(SearchResponse searchResponse) { - try { - fail("Search not cancelled early"); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - Throwable cancelledExc = e.getCause().getCause(); - assertThat(cancelledExc, is(instanceOf(TaskCancelledException.class))); - assertThat(cancelledExc.getMessage(), is("task cancelled [simulated]")); - assertThat(((TaskCancelledException) cancelledExc).status(), is(RestStatus.BAD_REQUEST)); - latch.countDown(); - } - }); - latch.await(); - assertThat(searchContextCreated.get(), is(false)); - - clearScrollRequest.setScrollIds(singletonList(scrollId)); - client().clearScroll(clearScrollRequest); - } - - public void testWaitOnRefresh() throws ExecutionException, InterruptedException { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - PlainActionFuture future = new PlainActionFuture<>(); - service.executeQueryPhase(request, task, future.delegateFailure((l, r) -> { - assertEquals(1, r.queryResult().getTotalHits().value()); - l.onResponse(null); - })); - future.get(); - } - - public void testWaitOnRefreshFailsWithRefreshesDisabled() { - createIndex("index", Settings.builder().put("index.refresh_interval", "-1").build()); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueSeconds(30)); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, future::actionGet); - assertThat( - illegalArgumentException.getMessage(), - containsString("Cannot use wait_for_checkpoints with [index.refresh_interval=-1]") - ); - } - - public void testWaitOnRefreshFailsIfCheckpointNotIndexed() { - createIndex("index"); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - // Increased timeout to avoid cancelling the search task prior to its completion, - // as we expect to raise an Exception. Timeout itself is tested on the following `testWaitOnRefreshTimeout` test. - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(200, 300))); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 1 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - - IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, future::actionGet); - assertThat( - ex.getMessage(), - containsString("Cannot wait for unissued seqNo checkpoint [wait_for_checkpoint=1, max_issued_seqNo=0]") - ); - } - - public void testWaitOnRefreshTimeout() { - createIndex("index", Settings.builder().put("index.refresh_interval", "60s").build()); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.setWaitForCheckpointsTimeout(TimeValue.timeValueMillis(randomIntBetween(10, 100))); - searchRequest.setWaitForCheckpoints(Collections.singletonMap("index", new long[] { 0 })); - - final DocWriteResponse response = prepareIndex("index").setSource("id", "1").get(); - assertEquals(RestStatus.CREATED, response.status()); - - SearchShardTask task = new SearchShardTask(123L, "", "", "", null, emptyMap()); - PlainActionFuture future = new PlainActionFuture<>(); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null, - null, - null - ); - service.executeQueryPhase(request, task, future); - - SearchTimeoutException ex = expectThrows(SearchTimeoutException.class, future::actionGet); - assertThat(ex.getMessage(), containsString("Wait for seq_no [0] refreshed timed out [")); - } - - public void testMinimalSearchSourceInShardRequests() { - createIndex("test"); - int numDocs = between(0, 10); - for (int i = 0; i < numDocs; i++) { - prepareIndex("test").setSource("id", Integer.toString(i)).get(); - } - indicesAdmin().prepareRefresh("test").get(); - - BytesReference pitId = client().execute( - TransportOpenPointInTimeAction.TYPE, - new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) - ).actionGet().getPointInTimeId(); - final MockSearchService searchService = (MockSearchService) getInstanceFromNode(SearchService.class); - final List shardRequests = new CopyOnWriteArrayList<>(); - searchService.setOnCreateSearchContext(ctx -> shardRequests.add(ctx.request())); - try { - assertHitCount( - client().prepareSearch() - .setSource( - new SearchSourceBuilder().size(between(numDocs, numDocs * 2)).pointInTimeBuilder(new PointInTimeBuilder(pitId)) - ), - numDocs - ); - } finally { - client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); - } - assertThat(shardRequests, not(emptyList())); - for (ShardSearchRequest shardRequest : shardRequests) { - assertNotNull(shardRequest.source()); - assertNotNull(shardRequest.source().pointInTimeBuilder()); - assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); - } - } - - public void testDfsQueryPhaseRewrite() { - createIndex("index"); - prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - final SearchService service = getInstanceFromNode(SearchService.class); - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); - final IndexShard indexShard = indexService.getShard(0); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1.0f, - -1, - null - ); - final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); - ReaderContext context = service.createAndPutReaderContext( - request, - indexService, - indexShard, - reader, - SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() - ); - PlainActionFuture plainActionFuture = new PlainActionFuture<>(); - service.executeQueryPhase( - new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), - new SearchShardTask(42L, "", "", "", null, emptyMap()), - plainActionFuture - ); - - plainActionFuture.actionGet(); - assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); - final ShardSearchContextId contextId = context.id(); - assertTrue(service.freeReaderContext(contextId)); - } - - public void testEnableSearchWorkerThreads() throws IOException { - IndexService indexService = createIndex("index", Settings.EMPTY); - IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { - SearchService service = getInstanceFromNode(SearchService.class); - SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertTrue(searchContext.searcher().hasExecutor()); - } - - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertFalse(searchContext.searcher().hasExecutor()); - } - } finally { - // reset original default setting - client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) - .get(); - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { - assertTrue(searchContext.searcher().hasExecutor()); - } - } - } - } - - /** - * Verify that a single slice is created for requests that don't support parallel collection, while an executor is still - * provided to the searcher to parallelize other operations. Also ensure multiple slices are created for requests that do support - * parallel collection. - */ - public void testSlicingBehaviourForParallelCollection() throws Exception { - IndexService indexService = createIndex("index", Settings.EMPTY); - ThreadPoolExecutor executor = (ThreadPoolExecutor) indexService.getThreadPool().executor(ThreadPool.Names.SEARCH); - final int configuredMaxPoolSize = 10; - executor.setMaximumPoolSize(configuredMaxPoolSize); // We set this explicitly to be independent of CPU cores. - int numDocs = randomIntBetween(50, 100); - for (int i = 0; i < numDocs; i++) { - prepareIndex("index").setId(String.valueOf(i)).setSource("field", "value").get(); - if (i % 5 == 0) { - indicesAdmin().prepareRefresh("index").get(); - } - } - final IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest request = new ShardSearchRequest( - OriginalIndices.NONE, - new SearchRequest().allowPartialSearchResults(randomBoolean()), - indexShard.shardId(), - 0, - indexService.numberOfShards(), - AliasFilter.EMPTY, - 1f, - System.currentTimeMillis(), - null - ); - SearchService service = getInstanceFromNode(SearchService.class); - NonCountingTermQuery termQuery = new NonCountingTermQuery(new Term("field", "value")); - assertEquals(0, executor.getCompletedTaskCount()); - try (ReaderContext readerContext = createReaderContext(indexService, indexShard)) { - SearchShardTask task = new SearchShardTask(0, "type", "action", "description", null, emptyMap()); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertTrue(searcher.hasExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "DFS supports parallel collection, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertTrue(searcher.hasExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.FETCH, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertFalse(searcher.hasExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as FETCH does not support parallel collection and thus runs on the calling" - + " thread.", - 0, - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.NONE, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertFalse(searcher.hasExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 as NONE does not support parallel collection.", - 0, // zero since one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - - try { - ClusterUpdateSettingsResponse response = client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) - .get(); - assertTrue(response.isAcknowledged()); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertFalse(searcher.hasExecutor()); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "The number of slices should be 1 when QUERY parallel collection is disabled.", - 0, // zero since one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - } finally { - // Reset to the original default setting and check to ensure it takes effect. - client().admin() - .cluster() - .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) - .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) - .get(); - { - try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true)) { - ContextIndexSearcher searcher = searchContext.searcher(); - assertTrue(searcher.hasExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); - searcher.search(termQuery, new TotalHitCountCollectorManager(searcher.getSlices())); - assertBusy( - () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices - 1, // one slice executes on the calling thread - executor.getCompletedTaskCount() - priorExecutorTaskCount - ) - ); - } - } - } - } - } - - private static ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { - return new ReaderContext( - new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), - indexService, - indexShard, - indexShard.acquireSearcherSupplier(), - randomNonNegativeLong(), - false - ); - } - - private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { - - final int asyncRewriteCount; - final Supplier fetched; - - TestRewriteCounterQueryBuilder() { - asyncRewriteCount = 0; - fetched = null; - } - - private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { - this.asyncRewriteCount = asyncRewriteCount; - this.fetched = fetched; - } - - @Override - public String getWriteableName() { - return "test_query"; - } - - @Override - public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ZERO; - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException {} - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException {} - - @Override - protected Query doToQuery(SearchExecutionContext context) throws IOException { - return new MatchAllDocsQuery(); - } - - @Override - protected boolean doEquals(TestRewriteCounterQueryBuilder other) { - return true; - } - - @Override - protected int doHashCode() { - return 42; - } - - @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - if (asyncRewriteCount > 0) { - return this; - } - if (fetched != null) { - if (fetched.get() == null) { - return this; - } - assert fetched.get(); - return new TestRewriteCounterQueryBuilder(1, null); - } - if (queryRewriteContext.convertToDataRewriteContext() != null) { - SetOnce awaitingFetch = new SetOnce<>(); - queryRewriteContext.registerAsyncAction((c, l) -> { - awaitingFetch.set(true); - l.onResponse(null); - }); - return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); - } - return this; - } } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTrackerTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTrackerTests.java index fbf742ae2ea57..8adcb3eb9d5f4 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShutdownProgressTrackerTests.java @@ -110,8 +110,10 @@ void simulateShardSnapshotsCompleting(SnapshotShutdownProgressTracker tracker, i } public void testTrackerLogsStats() { + final String dummyStatusMsg = "Dummy log message for index shard snapshot statuses"; SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> callerLogger.info(dummyStatusMsg), clusterSettings, testThreadPool ); @@ -144,6 +146,14 @@ public void testTrackerLogsStats() { "*Shard snapshot completion stats since shutdown began: Done [2]; Failed [1]; Aborted [1]; Paused [1]*" ) ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "index shard snapshot statuses", + SnapshotShutdownProgressTracker.class.getCanonicalName(), + Level.INFO, + dummyStatusMsg + ) + ); // Simulate updating the shard snapshot completion stats. simulateShardSnapshotsCompleting(tracker, 5); @@ -171,6 +181,7 @@ public void testTrackerProgressLoggingIntervalSettingCanBeDisabled() { ); SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> {}, clusterSettingsDisabledLogging, testThreadPool ); @@ -214,6 +225,7 @@ public void testTrackerIntervalSettingDynamically() { ); SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> {}, clusterSettingsDisabledLogging, testThreadPool ); @@ -253,6 +265,7 @@ public void testTrackerIntervalSettingDynamically() { public void testTrackerPauseTimestamp() { SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> {}, clusterSettings, testThreadPool ); @@ -263,7 +276,7 @@ public void testTrackerPauseTimestamp() { "pausing timestamp should be set", SnapshotShutdownProgressTracker.class.getName(), Level.INFO, - "*Finished signalling shard snapshots to pause at [" + testThreadPool.relativeTimeInMillis() + "]*" + "*Finished signalling shard snapshots to pause at [" + testThreadPool.relativeTimeInMillis() + " millis]*" ) ); @@ -283,6 +296,7 @@ public void testTrackerPauseTimestamp() { public void testTrackerRequestsToMaster() { SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> {}, clusterSettings, testThreadPool ); @@ -335,6 +349,7 @@ public void testTrackerRequestsToMaster() { public void testTrackerClearShutdown() { SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( getLocalNodeIdSupplier, + (callerLogger) -> {}, clusterSettings, testThreadPool ); @@ -345,7 +360,7 @@ public void testTrackerClearShutdown() { "pausing timestamp should be unset", SnapshotShutdownProgressTracker.class.getName(), Level.INFO, - "*Finished signalling shard snapshots to pause at [-1]*" + "*Finished signalling shard snapshots to pause at [-1 millis]*" ) ); diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 102797a963840..0ef16e7e9f555 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -23,6 +23,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.test.ESTestCase; @@ -118,8 +119,8 @@ public class BootstrapForTesting { // Log ifconfig output before SecurityManager is installed IfConfig.logIfNecessary(); - // install security manager if requested - if (systemPropertyAsBoolean("tests.security.manager", true)) { + // install security manager if available and requested + if (RuntimeVersionFeature.isSecurityManagerAvailable() && systemPropertyAsBoolean("tests.security.manager", true)) { try { // initialize paths the same exact way as bootstrap Permissions perms = new Permissions(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 87c566d543d0f..46f6a0b503bfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -57,13 +57,12 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; @@ -142,6 +141,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -160,6 +160,8 @@ public abstract class EngineTestCase extends ESTestCase { protected Store store; protected Store storeReplica; + protected MapperService mapperService; + protected InternalEngine engine; protected InternalEngine replicaEngine; @@ -198,6 +200,27 @@ protected Settings indexSettings() { .build(); } + protected String defaultMapping() { + return """ + { + "dynamic": false, + "properties": { + "value": { + "type": "keyword" + }, + "nested_field": { + "type": "nested", + "properties": { + "field-0": { + "type": "keyword" + } + } + } + } + } + """; + } + @Override @Before public void setUp() throws Exception { @@ -212,15 +235,16 @@ public void setUp() throws Exception { } else { codecName = "default"; } - defaultSettings = IndexSettingsModule.newIndexSettings("test", indexSettings()); + defaultSettings = IndexSettingsModule.newIndexSettings("index", indexSettings()); threadPool = new TestThreadPool(getClass().getName()); store = createStore(); storeReplica = createStore(); Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); primaryTranslogDir = createTempDir("translog-primary"); - translogHandler = createTranslogHandler(defaultSettings); - engine = createEngine(store, primaryTranslogDir); + mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping()); + translogHandler = createTranslogHandler(mapperService); + engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy()); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -230,7 +254,7 @@ public void setUp() throws Exception { engine.config().setEnableGcDeletes(false); } replicaTranslogDir = createTempDir("translog-replica"); - replicaEngine = createEngine(storeReplica, replicaTranslogDir); + replicaEngine = createEngine(defaultSettings, storeReplica, replicaTranslogDir, newMergePolicy()); currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig(); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -433,37 +457,9 @@ protected static ParsedDocument testParsedDocument( ); } - public static CheckedBiFunction nestedParsedDocFactory() throws Exception { - final MapperService mapperService = createMapperService(); - final String nestedMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested_field") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); - final DocumentMapper nestedMapper = mapperService.merge( - "type", - new CompressedXContent(nestedMapping), - MapperService.MergeReason.MAPPING_UPDATE - ); - return (docId, nestedFieldValues) -> { - final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", "value"); - if (nestedFieldValues > 0) { - XContentBuilder nestedField = source.startObject("nested_field"); - for (int i = 0; i < nestedFieldValues; i++) { - nestedField.field("field-" + i, "value-" + i); - } - source.endObject(); - } - source.endObject(); - return nestedMapper.parse(new SourceToParse(docId, BytesReference.bytes(source), XContentType.JSON)); - }; + public static ParsedDocument parseDocument(MapperService mapperService, String id, String routing) { + SourceToParse sourceToParse = new SourceToParse(id, new BytesArray("{ \"value\" : \"test\" }"), XContentType.JSON, routing); + return mapperService.documentMapper().parse(sourceToParse); } protected Store createStore() throws IOException { @@ -500,8 +496,8 @@ protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSup ); } - protected TranslogHandler createTranslogHandler(IndexSettings indexSettings) { - return new TranslogHandler(xContentRegistry(), indexSettings); + protected TranslogHandler createTranslogHandler(MapperService mapperService) { + return new TranslogHandler(mapperService); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -857,7 +853,7 @@ public EngineConfig config( this::relativeTimeInNanos, indexCommitListener, true, - null + mapperService ); } @@ -1031,6 +1027,22 @@ public static List generateSingleDocHistory( return ops; } + private CheckedBiFunction nestedParsedDocFactory(MapperService mapperService) { + final DocumentMapper nestedMapper = mapperService.documentMapper(); + return (docId, nestedFieldValues) -> { + final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("value", "test"); + if (nestedFieldValues > 0) { + XContentBuilder nestedField = source.startObject("nested_field"); + for (int i = 0; i < nestedFieldValues; i++) { + nestedField.field("field-" + i, "value-" + i); + } + source.endObject(); + } + source.endObject(); + return nestedMapper.parse(new SourceToParse(docId, BytesReference.bytes(source), XContentType.JSON)); + }; + } + public List generateHistoryOnReplica( int numOps, boolean allowGapInSeqNo, @@ -1050,7 +1062,9 @@ public List generateHistoryOnReplica( long seqNo = startingSeqNo; final int maxIdValue = randomInt(numOps * 2); final List operations = new ArrayList<>(numOps); - CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory(); + CheckedBiFunction nestedParsedDocFactory = nestedParsedDocFactory( + engine.engineConfig.getMapperService() + ); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); @@ -1059,7 +1073,9 @@ public List generateHistoryOnReplica( final long startTime = threadPool.relativeTimeInNanos(); final int copies = allowDuplicate && rarely() ? between(2, 4) : 1; for (int copy = 0; copy < copies; copy++) { - final ParsedDocument doc = isNestedDoc ? nestedParsedDocFactory.apply(id, nestedValues) : createParsedDoc(id, null); + final ParsedDocument doc = isNestedDoc + ? nestedParsedDocFactory.apply(id, nestedValues) + : parseDocument(engine.engineConfig.getMapperService(), id, null); switch (opType) { case INDEX -> operations.add( new Engine.Index( @@ -1274,7 +1290,17 @@ public static List getDocIds(Engine engine, boolean refresh */ public static List readAllOperationsInLucene(Engine engine) throws IOException { final List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean())) { + try ( + Translog.Snapshot snapshot = engine.newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) + ) { Translog.Operation op; while ((op = snapshot.next()) != null) { operations.add(op); @@ -1345,7 +1371,15 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm())); assertThat(luceneOp.opType(), equalTo(translogOp.opType())); if (luceneOp.opType() == Translog.Operation.Type.INDEX) { - assertThat(((Translog.Index) luceneOp).source(), equalTo(((Translog.Index) translogOp).source())); + if (engine.engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) { + assertToXContentEquivalent( + ((Translog.Index) luceneOp).source(), + ((Translog.Index) translogOp).source(), + XContentFactory.xContentType(((Translog.Index) luceneOp).source().array()) + ); + } else { + assertThat(((Translog.Index) luceneOp).source(), equalTo(((Translog.Index) translogOp).source())); + } } } } @@ -1401,15 +1435,19 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings } public static MapperService createMapperService() throws IOException { - IndexMetadata indexMetadata = IndexMetadata.builder("test") - .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) - .putMapping("{\"properties\": {}}") + return createMapperService(Settings.EMPTY, "{}"); + } + + public static MapperService createMapperService(Settings settings, String mappings) throws IOException { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(settings)) + .putMapping(mappings) .build(); MapperService mapperService = MapperTestUtils.newMapperService( new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), createTempDir(), - Settings.EMPTY, - "test" + indexMetadata.getSettings(), + "index" ); mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE); return mapperService; diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 57cca12f99c41..33c745de25438 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -43,6 +43,10 @@ long appliedOperations() { return appliedOperations.get(); } + public TranslogHandler(MapperService mapperService) { + this.mapperService = mapperService; + } + public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 29bb3b15a9f86..7dcbbce9fa8e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -9,9 +9,12 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.LeafReader; @@ -20,7 +23,11 @@ import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -30,11 +37,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshot; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -43,6 +53,7 @@ import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.termvectors.TermVectorsService; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; @@ -1130,6 +1141,11 @@ public final void testSyntheticSource() throws IOException { assertSyntheticSource(syntheticSourceSupport(shouldUseIgnoreMalformed()).example(5)); } + public final void testSyntheticSourceWithTranslogSnapshot() throws IOException { + assertSyntheticSourceWithTranslogSnapshot(syntheticSourceSupport(shouldUseIgnoreMalformed()), true); + assertSyntheticSourceWithTranslogSnapshot(syntheticSourceSupport(shouldUseIgnoreMalformed()), false); + } + public void testSyntheticSourceIgnoreMalformedExamples() throws IOException { assumeTrue("type doesn't support ignore_malformed", supportsIgnoreMalformed()); // We need to call this in order to hit the assumption inside so that @@ -1155,6 +1171,71 @@ private void assertSyntheticSource(SyntheticSourceExample example) throws IOExce assertThat(syntheticSource(mapper, example::buildInput), equalTo(example.expected())); } + private void assertSyntheticSourceWithTranslogSnapshot(SyntheticSourceSupport support, boolean doIndexSort) throws IOException { + var firstExample = support.example(1); + int maxDocs = randomIntBetween(20, 50); + var settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + var mapperService = createMapperService(getVersion(), settings, () -> true, mapping(b -> { + b.startObject("field"); + firstExample.mapping().accept(b); + b.endObject(); + })); + var docMapper = mapperService.documentMapper(); + try (var directory = newDirectory()) { + List examples = new ArrayList<>(); + IndexWriterConfig config = newIndexWriterConfig(random(), new StandardAnalyzer()); + config.setIndexSort(new Sort(new SortField("sort", SortField.Type.LONG))); + try (var iw = new RandomIndexWriter(random(), directory, config)) { + for (int seqNo = 0; seqNo < maxDocs; seqNo++) { + var example = support.example(randomIntBetween(1, 5)); + examples.add(example); + var doc = docMapper.parse(source(example::buildInput)); + assertNull(doc.dynamicMappingsUpdate()); + doc.updateSeqID(seqNo, 1); + doc.version().setLongValue(0); + if (doIndexSort) { + doc.rootDoc().add(new NumericDocValuesField("sort", randomLong())); + } + iw.addDocuments(doc.docs()); + if (frequently()) { + iw.flush(); + } + } + } + try (var indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) { + int start = randomBoolean() ? 0 : randomIntBetween(1, maxDocs - 10); + var snapshot = new LuceneSyntheticSourceChangesSnapshot( + mapperService.mappingLookup(), + new Engine.Searcher( + "recovery", + indexReader, + new BM25Similarity(), + null, + new UsageTrackingQueryCachingPolicy(), + () -> {} + ), + randomIntBetween(1, maxDocs), + randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()), + start, + maxDocs, + true, + randomBoolean(), + IndexVersion.current() + ); + for (int i = start; i < maxDocs; i++) { + var example = examples.get(i); + var op = snapshot.next(); + if (op instanceof Translog.Index opIndex) { + assertThat(opIndex.source().utf8ToString(), equalTo(example.expected())); + } + } + } + } + } + protected boolean supportsEmptyInputArray() { return true; } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index ec85feb200984..59c44925f920f 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -26,7 +26,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.SnapshotState; @@ -75,7 +74,6 @@ protected Collection> nodePlugins() { return Arrays.asList( MockTransportService.TestPlugin.class, MockFSIndexStore.TestPlugin.class, - RecoverySettingsChunkSizePlugin.class, InternalSettingsPlugin.class, MockEngineFactoryPlugin.class ); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index d3bfacdf7691a..510aa25f9b98e 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -28,7 +28,6 @@ import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsLoader; @@ -194,16 +193,6 @@ protected TransportService newTransportService( } } - @Override - void processRecoverySettings(PluginsService pluginsService, ClusterSettings clusterSettings, RecoverySettings recoverySettings) { - if (pluginsService.filterPlugins(RecoverySettingsChunkSizePlugin.class).findAny().isEmpty() == false) { - clusterSettings.addSettingsUpdateConsumer( - RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING, - recoverySettings::setChunkSize - ); - } - } - @Override protected ClusterInfoService newClusterInfoService( PluginsService pluginsService, diff --git a/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java b/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java deleted file mode 100644 index 489c9f704f419..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/node/RecoverySettingsChunkSizePlugin.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.node; - -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.Plugin; - -import java.util.List; - -import static java.util.Collections.singletonList; - -/** - * Marker plugin that will trigger {@link MockNode} making {@link #CHUNK_SIZE_SETTING} dynamic. - */ -public class RecoverySettingsChunkSizePlugin extends Plugin { - /** - * The chunk size. Only exposed by tests. - */ - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting( - "indices.recovery.chunk_size", - RecoverySettings.DEFAULT_CHUNK_SIZE, - Property.Dynamic, - Property.NodeScope - ); - - @Override - public List> getSettings() { - return singletonList(CHUNK_SIZE_SETTING); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index e869fc0836ba6..6612f0da0c43f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -118,6 +118,7 @@ import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; @@ -505,8 +506,10 @@ protected void afterIfSuccessful() throws Exception {} @BeforeClass public static void maybeStashClassSecurityManager() { - if (getTestClass().isAnnotationPresent(WithoutSecurityManager.class)) { - securityManagerRestorer = BootstrapForTesting.disableTestSecurityManager(); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + if (getTestClass().isAnnotationPresent(WithoutSecurityManager.class)) { + securityManagerRestorer = BootstrapForTesting.disableTestSecurityManager(); + } } } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 5630c33ad559c..896b245c8e920 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -19,7 +19,8 @@ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null), SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null), - INFERENCE_UNIFIED_API_ENABLED("es.inference_unified_feature_flag_enabled=true", Version.fromString("8.18.0"), null); + INFERENCE_UNIFIED_API_ENABLED("es.inference_unified_feature_flag_enabled=true", Version.fromString("8.18.0"), null), + MIGRATION_REINDEX_ENABLED("es.reindex_data_stream_feature_flag_enabled=true", Version.fromString("8.18.0"), null); public final String systemProperty; public final Version from; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java index a4b978e64da9f..08f2011e94e83 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java @@ -133,6 +133,6 @@ protected boolean overrideBucketsPath() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 167c5f66300a9..b35dca2881455 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -268,11 +267,7 @@ public long getTookInMillis() { super(in); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 0; - } + aliasesVersion = in.readVLong(); globalCheckpoint = in.readZLong(); maxSeqNo = in.readZLong(); maxSeqNoOfUpdatesOrDeletes = in.readZLong(); @@ -304,9 +299,7 @@ public long getTookInMillis() { public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeZLong(globalCheckpoint); out.writeZLong(maxSeqNo); out.writeZLong(maxSeqNoOfUpdatesOrDeletes); @@ -564,7 +557,17 @@ static Translog.Operation[] getOperations( long toSeqNo = Math.min(globalCheckpoint, (fromSeqNo + maxOperationCount) - 1); assert fromSeqNo <= toSeqNo : "invalid range from_seqno[" + fromSeqNo + "] > to_seqno[" + toSeqNo + "]"; final List operations = new ArrayList<>(); - try (Translog.Snapshot snapshot = indexShard.newChangesSnapshot("ccr", fromSeqNo, toSeqNo, true, true, false)) { + try ( + Translog.Snapshot snapshot = indexShard.newChangesSnapshot( + "ccr", + fromSeqNo, + toSeqNo, + true, + true, + false, + maxBatchSize.getBytes() + ) + ) { Translog.Operation op; while ((op = snapshot.next()) != null) { operations.add(op); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 5cd9f8bc5b78c..573c66cbb614a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -755,7 +755,15 @@ private void assertConsistentHistoryBetweenLeaderAndFollower( final Map operationsOnLeader = new HashMap<>(); try ( Translog.Snapshot snapshot = leader.getPrimary() - .newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean()) + .newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) ) { Translog.Operation op; while ((op = snapshot.next()) != null) { @@ -780,7 +788,8 @@ private void assertConsistentHistoryBetweenLeaderAndFollower( Long.MAX_VALUE, false, randomBoolean(), - randomBoolean() + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) ) ) { Translog.Operation op; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java index 4e3aea2cad205..e3f26eed0c2e9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; @@ -84,7 +85,15 @@ public void testPrimaryTermFromFollower() throws IOException { boolean accessStats = randomBoolean(); try ( - Translog.Snapshot snapshot = followerPrimary.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), accessStats) + Translog.Snapshot snapshot = followerPrimary.newChangesSnapshot( + "test", + 0, + Long.MAX_VALUE, + false, + randomBoolean(), + accessStats, + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) ) { if (accessStats) { assertThat(snapshot.totalOperations(), equalTo(operations.size())); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 150eddf039cec..62dc3313a1172 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -15,7 +15,11 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -31,7 +35,10 @@ import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.TranslogHandler; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -44,6 +51,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.nio.file.Path; @@ -94,7 +104,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testFollowingEngineRejectsNonFollowingIndex() { + public void testFollowingEngineRejectsNonFollowingIndex() throws IOException { final Settings.Builder builder = indexSettings(IndexVersion.current(), 1, 0); if (randomBoolean()) { builder.put("index.xpack.ccr.following_index", false); @@ -212,7 +222,7 @@ private EngineConfig engineConfig( final IndexSettings indexSettings, final ThreadPool threadPool, final Store store - ) { + ) throws IOException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final Path translogPath = createTempDir("translog"); final TranslogConfig translogConfig = new TranslogConfig( @@ -221,6 +231,7 @@ private EngineConfig engineConfig( indexSettings, BigArrays.NON_RECYCLING_INSTANCE ); + final MapperService mapperService = EngineTestCase.createMapperService(); return new EngineConfig( shardIdValue, threadPool, @@ -253,7 +264,7 @@ public void onFailedEngine(String reason, Exception e) { System::nanoTime, null, true, - null + mapperService ); } @@ -641,7 +652,15 @@ private void fetchOperations(AtomicBoolean stopped, AtomicLong lastFetchedSeqNo, final long toSeqNo = randomLongBetween(nextSeqNo, Math.min(nextSeqNo + 5, checkpoint)); try ( Translog.Snapshot snapshot = shuffleSnapshot( - leader.newChangesSnapshot("test", fromSeqNo, toSeqNo, true, randomBoolean(), randomBoolean()) + leader.newChangesSnapshot( + "test", + fromSeqNo, + toSeqNo, + true, + randomBoolean(), + randomBoolean(), + randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()) + ) ) ) { follower.advanceMaxSeqNoOfUpdatesOrDeletes(leader.getMaxSeqNoOfUpdatesOrDeletes()); @@ -689,6 +708,39 @@ public void close() throws IOException { }; } + private CheckedBiFunction nestedParsedDocFactory() throws Exception { + final MapperService mapperService = EngineTestCase.createMapperService(); + final String nestedMapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested_field") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + ); + final DocumentMapper nestedMapper = mapperService.merge( + "type", + new CompressedXContent(nestedMapping), + MapperService.MergeReason.MAPPING_UPDATE + ); + return (docId, nestedFieldValues) -> { + final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", "value"); + if (nestedFieldValues > 0) { + XContentBuilder nestedField = source.startObject("nested_field"); + for (int i = 0; i < nestedFieldValues; i++) { + nestedField.field("field-" + i, "value-" + i); + } + source.endObject(); + } + source.endObject(); + return nestedMapper.parse(new SourceToParse(docId, BytesReference.bytes(source), XContentType.JSON)); + }; + } + public void testProcessOnceOnPrimary() throws Exception { final Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), 1, 0).put("index.xpack.ccr.following_index", true); switch (indexMode) { @@ -709,7 +761,7 @@ public void testProcessOnceOnPrimary() throws Exception { final Settings settings = settingsBuilder.build(); final IndexMetadata indexMetadata = IndexMetadata.builder(index.getName()).settings(settings).build(); final IndexSettings indexSettings = new IndexSettings(indexMetadata, settings); - final CheckedBiFunction nestedDocFunc = EngineTestCase.nestedParsedDocFactory(); + final CheckedBiFunction nestedDocFunc = nestedParsedDocFactory(); int numOps = between(10, 100); List operations = new ArrayList<>(numOps); for (int i = 0; i < numOps; i++) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index b20f1a9d9ce23..972fbde8ccf3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -328,27 +328,15 @@ public FeatureSet(String name, boolean available, boolean enabled) { } public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), readAvailable(in), in.readBoolean()); + this(in.readString(), in.readBoolean(), in.readBoolean()); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readGenericMap(); // backcompat reading native code info, but no longer used here } } - // this is separated out so that the removed description can be read from the stream on construction - // TODO: remove this for 8.0 - private static boolean readAvailable(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.V_7_3_0)) { - in.readOptionalString(); - } - return in.readBoolean(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getTransportVersion().before(TransportVersions.V_7_3_0)) { - out.writeOptionalString(null); - } out.writeBoolean(available); out.writeBoolean(enabled); if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java index 8d2fd2ecc0870..84ada6b985af8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java @@ -48,7 +48,7 @@ protected void innerXContent(XContentBuilder builder, Params params) throws IOEx @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java index e984b4363b4df..2afd231f535f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -413,11 +412,7 @@ public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { this.writeBufferSizeInBytes = in.readVLong(); this.followerMappingVersion = in.readVLong(); this.followerSettingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - this.followerAliasesVersion = in.readVLong(); - } else { - this.followerAliasesVersion = 0L; - } + this.followerAliasesVersion = in.readVLong(); this.totalReadTimeMillis = in.readVLong(); this.totalReadRemoteExecTimeMillis = in.readVLong(); this.successfulReadRequests = in.readVLong(); @@ -457,9 +452,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(writeBufferSizeInBytes); out.writeVLong(followerMappingVersion); out.writeVLong(followerSettingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - out.writeVLong(followerAliasesVersion); - } + out.writeVLong(followerAliasesVersion); out.writeVLong(totalReadTimeMillis); out.writeVLong(totalReadRemoteExecTimeMillis); out.writeVLong(successfulReadRequests); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java index b8b6877e877fa..c3c1b1f925087 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java @@ -28,7 +28,7 @@ public FrozenIndicesFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java index fb7a564674534..6a72af5bce5e9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncWaitStep.java @@ -20,7 +20,7 @@ */ public abstract class AsyncWaitStep extends Step { - private Client client; + private final Client client; public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) { super(key, nextStepKey); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/BranchingStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/BranchingStep.java index db9e9a881b2f9..5dd5ab8e54f6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/BranchingStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/BranchingStep.java @@ -71,7 +71,7 @@ public ClusterState performAction(Index index, ClusterState clusterState) { * This method returns the next step to execute based on the predicate. If * the predicate returned true, then nextStepKeyOnTrue is the key of the * next step to run, otherwise nextStepKeyOnFalse is. - * + *

* throws {@link UnsupportedOperationException} if performAction was not called yet * * @return next step to execute diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java index 28b04bc9614bb..fcb9c78ebefd7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckNotDataStreamWriteIndexStep.java @@ -13,11 +13,10 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.util.Locale; - /** * Some actions cannot be executed on a data stream's write index (eg. `searchable-snapshot`). This step checks if the managed index is * part of a data stream, in which case it will check it's not the write index. If the managed index is the write index of a data stream @@ -46,8 +45,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { String indexName = index.getName(); if (indexMetadata == null) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "[%s] lifecycle action for index [%s] executed but index no longer exists", getKey().action(), indexName @@ -64,8 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { if (dataStream != null) { boolean isFailureStoreWriteIndex = index.equals(dataStream.getFailureStoreWriteIndex()); if (isFailureStoreWriteIndex || dataStream.getWriteIndex().equals(index)) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "index [%s] is the%s write index for data stream [%s], pausing " + "ILM execution of lifecycle [%s] until this index is no longer the write index for the data stream via manual or " + "automated rollover", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStep.java index b731dc0b6c2c8..3760684467dfb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStep.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.io.IOException; -import java.util.Locale; import java.util.Objects; /** @@ -158,8 +157,7 @@ public Info(String nodeId, long expectedShards, long numberShardsLeftToAllocate) if (numberShardsLeftToAllocate < 0) { this.message = "Waiting for all shards to become active"; } else { - this.message = String.format( - Locale.ROOT, + this.message = Strings.format( "Waiting for node [%s] to contain [%d] shards, found [%d], remaining [%d]", nodeId, expectedShards, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStep.java index 61e4446c5c522..af4b6fd4291ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStep.java @@ -10,11 +10,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.util.Locale; - /** * This step checks whether the new shrunken index's shards count is a factor of the source index's shards count. */ @@ -53,8 +52,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { int sourceNumberOfShards = indexMetadata.getNumberOfShards(); if (sourceNumberOfShards % numberOfShards != 0) { String policyName = indexMetadata.getLifecyclePolicyName(); - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "lifecycle action of policy [%s] for index [%s] cannot make progress " + "because the target shards count [%d] must be a factor of the source index's shards count [%d]", policyName, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStep.java index 5bb226eec936a..509f96bf1bc35 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStep.java @@ -66,27 +66,24 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl } getClient().admin() .indices() - .delete( - new DeleteIndexRequest(shrinkIndexName).masterNodeTimeout(TimeValue.MAX_VALUE), - new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - // even if not all nodes acked the delete request yet we can consider this operation as successful as - // we'll generate a new index name and attempt to shrink into the newly generated name - listener.onResponse(null); - } + .delete(new DeleteIndexRequest(shrinkIndexName).masterNodeTimeout(TimeValue.MAX_VALUE), new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + // even if not all nodes acked the delete request yet we can consider this operation as successful as + // we'll generate a new index name and attempt to shrink into the newly generated name + listener.onResponse(null); + } - @Override - public void onFailure(Exception e) { - if (e instanceof IndexNotFoundException) { - // we can move on if the index was deleted in the meantime - listener.onResponse(null); - } else { - listener.onFailure(e); - } + @Override + public void onFailure(Exception e) { + if (e instanceof IndexNotFoundException) { + // we can move on if the index was deleted in the meantime + listener.onResponse(null); + } else { + listener.onFailure(e); } } - ); + }); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java index bc76d53226ee0..d1dbfede63c60 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitStep.java @@ -12,7 +12,7 @@ /** * Checks whether a condition has been met based on the cluster state. - * + *

* If checking a condition not based on the cluster state, or which may take time to evaluate, use {@link AsyncWaitStep}. */ public abstract class ClusterStateWaitStep extends Step { @@ -35,19 +35,19 @@ public boolean isCompletable() { public static class Result { private final boolean complete; - private final ToXContentObject infomationContext; + private final ToXContentObject informationContext; - public Result(boolean complete, ToXContentObject infomationContext) { + public Result(boolean complete, ToXContentObject informationContext) { this.complete = complete; - this.infomationContext = infomationContext; + this.informationContext = informationContext; } public boolean isComplete() { return complete; } - public ToXContentObject getInfomationContext() { - return infomationContext; + public ToXContentObject getInformationContext() { + return informationContext; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java index fc30ae07e8889..5e30baa6b9669 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStep.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.time.Clock; -import java.util.Locale; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; @@ -30,7 +29,7 @@ * If the action response is complete, the {@link ClusterStateWaitUntilThresholdStep}'s nextStepKey will be the nextStepKey of the * wrapped action. When the threshold level is surpassed, if the underlying step's condition was not met, the nextStepKey will be changed to * the provided {@link #nextKeyOnThresholdBreach} and this step will stop waiting. - * + *

* Failures encountered whilst executing the wrapped action will be propagated directly. */ public class ClusterStateWaitUntilThresholdStep extends ClusterStateWaitStep { @@ -72,14 +71,13 @@ public Result isConditionMet(Index index, ClusterState clusterState) { // we may not have passed the time threshold, but the step is not completable due to a different reason thresholdPassed.set(true); - String message = String.format( - Locale.ROOT, + String message = Strings.format( "[%s] lifecycle step, as part of [%s] action, for index [%s] Is not " + "completable, reason: [%s]. Abandoning execution and moving to the next fallback step [%s]", getKey().name(), getKey().action(), idxMeta.getIndex().getName(), - Strings.toString(stepResult.getInfomationContext()), + Strings.toString(stepResult.getInformationContext()), nextKeyOnThresholdBreach ); logger.debug(message); @@ -90,8 +88,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { // we retried this step enough, next step will be the configured to {@code nextKeyOnThresholdBreach} thresholdPassed.set(true); - String message = String.format( - Locale.ROOT, + String message = Strings.format( "[%s] lifecycle step, as part of [%s] action, for index [%s] executed for" + " more than [%s]. Abandoning execution and moving to the next fallback step [%s]", getKey().name(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java index 6b39258bcc77f..35818943017b5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java @@ -23,7 +23,7 @@ * Copies the execution state data from one index to another, typically after a * new index has been created. As part of the execution state copy it will set the target index * "current step" to the provided target next step {@link org.elasticsearch.xpack.core.ilm.Step.StepKey}. - * + *

* Useful for actions such as shrink. */ public class CopyExecutionStateStep extends ClusterStateActionStep { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java index eddeb1a4cb1b2..00180f95fb269 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java @@ -12,11 +12,11 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import java.util.Arrays; -import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; @@ -78,9 +78,8 @@ public ClusterState performAction(Index index, ClusterState clusterState) { String targetIndexName = targetIndexNameSupplier.apply(sourceIndexName, sourceIndexMetadata.getLifecycleExecutionState()); IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName); if (targetIndexMetadata == null) { - String errorMessage = String.format( - Locale.ROOT, - "index [%s] is being referenced by ILM action [%s] on step [%s] but " + "it doesn't exist", + String errorMessage = Strings.format( + "index [%s] is being referenced by ILM action [%s] on step [%s] but it doesn't exist", targetIndexName, getKey().action(), getKey().name() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java index 855b579e8843b..b15fece92d4e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java @@ -13,12 +13,12 @@ import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider; import org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo; import java.util.List; -import java.util.Locale; import java.util.Optional; import static org.elasticsearch.xpack.core.ilm.AllocationRoutedStep.getPendingAllocations; @@ -103,8 +103,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { if (allocationPendingAllShards > 0) { String statusMessage = availableDestinationTier.map( - s -> String.format( - Locale.ROOT, + s -> Strings.format( "[%s] lifecycle action [%s] waiting for [%s] shards to be moved to the [%s] tier (tier " + "migration preference configuration is %s)", index.getName(), @@ -115,9 +114,8 @@ public Result isConditionMet(Index index, ClusterState clusterState) { ) ) .orElseGet( - () -> String.format( - Locale.ROOT, - "index [%s] has a preference for tiers %s, but no nodes for any of those tiers are " + "available in the cluster", + () -> Strings.format( + "index [%s] has a preference for tiers %s, but no nodes for any of those tiers are available in the cluster", index.getName(), preferredTierConfiguration ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java index d8f12edbe3333..7fb350f13a850 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteStep.java @@ -17,11 +17,10 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; -import java.util.Locale; - /** * Deletes a single index. */ @@ -64,8 +63,7 @@ public void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState cu ); return; } else if (isFailureStoreWriteIndex || dataStream.getWriteIndex().getName().equals(indexName)) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "index [%s] is the%s write index for data stream [%s]. " + "stopping execution of lifecycle [%s] as a data stream's write index cannot be deleted. manually rolling over the" + " index will resume the execution of the policy as the index will not be the data stream's write index anymore", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 59ff38b317327..697f948e47832 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -213,7 +213,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { // upgrade was performed resume the ILM execution and complete the downsample action after upgrade.) NoopStep cleanupDownsampleIndexStep = new NoopStep(cleanupDownsampleIndexKey, downsampleKey); - // Prepare the lifecycleState by generating the name of the target index, that subsequest steps will use. + // Prepare the lifecycleState by generating the name of the target index, that subsequent steps will use. DownsamplePrepareLifeCycleStateStep generateDownsampleIndexNameStep = new DownsamplePrepareLifeCycleStateStep( generateDownsampleIndexNameKey, downsampleKey, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsamplePrepareLifeCycleStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsamplePrepareLifeCycleStateStep.java index a5d8ef175d27d..c9f568683e3dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsamplePrepareLifeCycleStateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsamplePrepareLifeCycleStateStep.java @@ -20,7 +20,7 @@ /** * An ILM step that sets the target index to use in the {@link DownsampleStep}. - * The reason why this is done in a seperate step and stored in {@link LifecycleExecutionState}, + * The reason why this is done in a separate step and stored in {@link LifecycleExecutionState}, * is because other steps after downsampling also depend on the target index generated here. */ public class DownsamplePrepareLifeCycleStateStep extends ClusterStateActionStep { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleRequest.java index 00bb4e6f70702..5c607335bff39 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleRequest.java @@ -18,7 +18,7 @@ /** * The request object used by the Explain Lifecycle API. - * + *

* Multiple indices may be queried in the same request using the * {@link #indices(String...)} method */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java index 755851b2ec88c..914a025e35c21 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ExplainLifecycleResponse.java @@ -22,7 +22,7 @@ /** * The response object returned by the Explain Lifecycle API. - * + *

* Since the API can be run over multiple indices the response provides a map of * index to the explanation of the lifecycle status for that index. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java index 693631a7ffcd3..f3afe9e4d52cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ForceMergeStep.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.Strings; import java.util.Arrays; -import java.util.Locale; import java.util.Objects; import java.util.stream.Collectors; @@ -62,8 +61,7 @@ public void performAction( } else { DefaultShardOperationFailedException[] failures = response.getShardFailures(); String policyName = indexMetadata.getLifecyclePolicyName(); - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "index [%s] in policy [%s] encountered failures [%s] on step [%s]", indexName, policyName, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyException.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyException.java index f42b20aa501e4..c8b80bc79c9e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyException.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyException.java @@ -7,8 +7,7 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.ElasticsearchException; - -import java.util.Locale; +import org.elasticsearch.common.Strings; /** * Exception thrown when a problem is encountered while initialising an ILM policy for an index. @@ -16,6 +15,6 @@ public class InitializePolicyException extends ElasticsearchException { public InitializePolicyException(String policy, String index, Throwable cause) { - super(String.format(Locale.ROOT, "unable to initialize policy [%s] for index [%s]", policy, index), cause); + super(Strings.format("unable to initialize policy [%s] for index [%s]", policy, index), cause); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateUtils.java index 4456350f3f92a..ab445ed279ea8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateUtils.java @@ -24,10 +24,10 @@ private LifecycleExecutionStateUtils() {} /** * Given a cluster state, index, and lifecycle state, returns a cluster state where * the lifecycle state will be associated with the given index. - * + *

* The passed-in index must already be present in the cluster state, this method cannot * be used to add an index. - * + *

* See also {@link Metadata#withLifecycleState}. */ public static ClusterState newClusterStateWithLifecycleState( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java index 529eb16b668c3..8517635da977c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java @@ -38,7 +38,6 @@ public class LifecycleOperationMetadata implements Metadata.Custom { public static final ParseField SLM_OPERATION_MODE_FIELD = new ParseField("slm_operation_mode"); public static final LifecycleOperationMetadata EMPTY = new LifecycleOperationMetadata(OperationMode.RUNNING, OperationMode.RUNNING); - @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( TYPE, a -> new LifecycleOperationMetadata(OperationMode.valueOf((String) a[0]), OperationMode.valueOf((String) a[1])) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java index ee42eb8b3fce3..e7021d22de47e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java @@ -220,16 +220,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * This method is used to compile this policy into its execution plan built out * of {@link Step} instances. The order of the {@link Phase}s and {@link LifecycleAction}s is * determined by the {@link LifecycleType} associated with this policy. - * + *

* The order of the policy will have this structure: - * + *

* - initialize policy context step * - phase-1 phase-after-step * - ... phase-1 action steps * - phase-2 phase-after-step * - ... * - terminal policy step - * + *

* We first initialize the policy's context and ensure that the index has proper settings set. * Then we begin each phase's after-step along with all its actions as steps. Finally, we have * a terminal step to inform us that this policy's steps are all complete. Each phase's `after` diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java index e8f76b655b70e..8a64dee53792f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java @@ -21,16 +21,16 @@ * This class wraps a client and calls the client using the headers provided in * constructor. The intent is to abstract away the fact that there are headers * so {@link Step}s etc. can call this client as if it was a normal client. - * + *

* Note: This client will not close the wrapped {@link Client} instance since * the intent is that the wrapped client is shared between multiple instances of * this class. */ public class LifecyclePolicySecurityClient extends AbstractClient { - private Client client; - private Map headers; - private String origin; + private final Client client; + private final Map headers; + private final String origin; public LifecyclePolicySecurityClient(Client client, String origin, Map headers) { super(client.settings(), client.threadPool()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java index 8fe8c8835b98d..1a64e589d20b5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java @@ -33,7 +33,7 @@ */ public class LifecyclePolicyUtils { - private LifecyclePolicyUtils() {}; + private LifecyclePolicyUtils() {} /** * Loads a built-in index lifecycle policy and returns its source. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java index 53247d6428bfb..e3719d57ca25c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java @@ -23,7 +23,7 @@ /** * This task updates the operation mode state for ILM. - * + *

* As stopping ILM proved to be an action we want to sometimes take in order to allow clusters to stabilise when under heavy load this * task might run at {@link Priority#IMMEDIATE} priority so please make sure to keep this task as lightweight as possible. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagement.java index 26966195989bb..c3e8cbb6af3bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PhaseCacheManagement.java @@ -230,11 +230,7 @@ public static boolean isIndexPhaseDefinitionUpdatable( final Set newPhaseStepKeys = readStepKeys(xContentRegistry, client, peiJson, currentPhase, licenseState); if (newPhaseStepKeys == null) { logger.debug( - () -> format( - "[%s] unable to parse phase definition for policy [%s] " + "to determine if it could be refreshed", - index, - policyId - ) + () -> format("[%s] unable to parse phase definition for policy [%s] to determine if it could be refreshed", index, policyId) ); return false; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReplaceDataStreamBackingIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReplaceDataStreamBackingIndexStep.java index 3962768e94212..8bbe68513f425 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReplaceDataStreamBackingIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ReplaceDataStreamBackingIndexStep.java @@ -14,9 +14,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; -import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; @@ -75,8 +75,7 @@ public ClusterState performAction(Index index, ClusterState clusterState) { assert indexAbstraction != null : "invalid cluster metadata. index [" + index.getName() + "] was not found"; DataStream dataStream = indexAbstraction.getParentDataStream(); if (dataStream == null) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "index [%s] is not part of a data stream. stopping execution of lifecycle " + "[%s] until the index is added to a data stream", originalIndex, @@ -88,8 +87,7 @@ public ClusterState performAction(Index index, ClusterState clusterState) { boolean isFailureStoreWriteIndex = index.equals(dataStream.getFailureStoreWriteIndex()); if (isFailureStoreWriteIndex || dataStream.getWriteIndex().equals(index)) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "index [%s] is the%s write index for data stream [%s], pausing " + "ILM execution of lifecycle [%s] until this index is no longer the write index for the data stream via manual or " + "automated rollover", @@ -104,9 +102,8 @@ public ClusterState performAction(Index index, ClusterState clusterState) { IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName); if (targetIndexMetadata == null) { - String errorMessage = String.format( - Locale.ROOT, - "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for" + " index [%s]", + String errorMessage = Strings.format( + "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for index [%s]", targetIndexName, policyName, originalIndex diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java index 3d140f5a9d764..b2ca71936ca33 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; -import java.util.Locale; import java.util.Objects; /** @@ -51,7 +50,7 @@ public void performAction( String indexName = indexMetadata.getIndex().getName(); boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(indexMetadata.getSettings()); if (indexingComplete) { - logger.trace(indexMetadata.getIndex() + " has lifecycle complete set, skipping " + RolloverStep.NAME); + logger.trace("{} has lifecycle complete set, skipping {}", indexMetadata.getIndex(), RolloverStep.NAME); listener.onResponse(null); return; } @@ -81,8 +80,7 @@ public void performAction( if (Strings.isNullOrEmpty(rolloverAlias)) { listener.onFailure( new IllegalArgumentException( - String.format( - Locale.ROOT, + Strings.format( "setting [%s] for index [%s] is empty or not defined, it must be set to the name of the alias " + "pointing to the group of indices being rolled over", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, @@ -106,8 +104,7 @@ public void performAction( if (indexMetadata.getAliases().containsKey(rolloverAlias) == false) { listener.onFailure( new IllegalArgumentException( - String.format( - Locale.ROOT, + Strings.format( "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java index da64df2672bdb..f585575534b76 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java @@ -470,5 +470,5 @@ static SearchableSnapshotMetadata extractSearchableSnapshotFromSettings(IndexMet return new SearchableSnapshotMetadata(indexName, repo, snapshotName, partial); } - record SearchableSnapshotMetadata(String sourceIndex, String repositoryName, String snapshotName, boolean partial) {}; + record SearchableSnapshotMetadata(String sourceIndex, String repositoryName, String snapshotName, boolean partial) {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java index 82f502c96e8b5..800ea603ede8c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java @@ -62,7 +62,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, if (idxSegments == null || (response.getShardFailures() != null && response.getShardFailures().length > 0)) { final DefaultShardOperationFailedException[] failures = response.getShardFailures(); logger.info( - "[{}] retrieval of segment counts after force merge did not succeed, " + "there were {} shard failures. failures: {}", + "[{}] retrieval of segment counts after force merge did not succeed, there were {} shard failures. failures: {}", index.getName(), response.getFailedShards(), failures == null diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java index c94e881ad407e..376567bc2004c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetPriorityAction.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * A {@link LifecycleAction} which sets the index's priority. The higher the priority, the faster the recovery. @@ -31,7 +32,6 @@ public class SetPriorityAction implements LifecycleAction { public static final String NAME = "set_priority"; public static final ParseField RECOVERY_PRIORITY_FIELD = new ParseField("priority"); - @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, a -> new SetPriorityAction((Integer) a[0]) @@ -108,15 +108,13 @@ public List toSteps(Client client, String phase, StepKey nextStepKey) { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - SetPriorityAction that = (SetPriorityAction) o; - - return recoveryPriority != null ? recoveryPriority.equals(that.recoveryPriority) : that.recoveryPriority == null; + return Objects.equals(recoveryPriority, that.recoveryPriority); } @Override public int hashCode() { - return recoveryPriority != null ? recoveryPriority.hashCode() : 0; + return Objects.hash(recoveryPriority); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java index 1744d0e0384fd..379888493c894 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStep.java @@ -120,7 +120,7 @@ public void performAction( // No nodes currently match the allocation rules, so report this as an error and we'll retry logger.debug("could not find any nodes to allocate index [{}] onto prior to shrink", indexName); listener.onFailure( - new NoNodeAvailableException("could not find any nodes to allocate index [" + indexName + "] onto" + " prior to shrink") + new NoNodeAvailableException("could not find any nodes to allocate index [" + indexName + "] onto prior to shrink") ); } } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java index 9cbcd6c62dc3b..401d87f853360 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java @@ -69,9 +69,9 @@ public class ShrinkAction implements LifecycleAction { .put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), (String) null) .build(); - private Integer numberOfShards; - private ByteSizeValue maxPrimaryShardSize; - private boolean allowWriteAfterShrink; + private final Integer numberOfShards; + private final ByteSizeValue maxPrimaryShardSize; + private final boolean allowWriteAfterShrink; public static ShrinkAction parse(XContentParser parser) throws IOException { return PARSER.parse(parser, null); @@ -89,11 +89,13 @@ public ShrinkAction(@Nullable Integer numberOfShards, @Nullable ByteSizeValue ma throw new IllegalArgumentException("[max_primary_shard_size] must be greater than 0"); } this.maxPrimaryShardSize = maxPrimaryShardSize; + this.numberOfShards = null; } else { if (numberOfShards <= 0) { throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); } this.numberOfShards = numberOfShards; + this.maxPrimaryShardSize = null; } this.allowWriteAfterShrink = allowWriteAfterShrink; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkStep.java index 0628e60a7f39f..2ada24eef7cf6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkStep.java @@ -30,8 +30,8 @@ public class ShrinkStep extends AsyncActionStep { public static final String NAME = "shrink"; private static final Logger logger = LogManager.getLogger(ShrinkStep.class); - private Integer numberOfShards; - private ByteSizeValue maxPrimaryShardSize; + private final Integer numberOfShards; + private final ByteSizeValue maxPrimaryShardSize; public ShrinkStep(StepKey key, StepKey nextStepKey, Client client, Integer numberOfShards, ByteSizeValue maxPrimaryShardSize) { super(key, nextStepKey, client); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java index 82e4280dcc4cc..a44b717b0a457 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java @@ -15,9 +15,9 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; -import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; @@ -83,9 +83,8 @@ public void performAction( if (targetIndexMetadata == null) { String policyName = indexMetadata.getLifecyclePolicyName(); - String errorMessage = String.format( - Locale.ROOT, - "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for" + " index [%s]", + String errorMessage = Strings.format( + "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for index [%s]", targetIndexName, policyName, originalIndex diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java index 3203f85b2a7eb..48a0e65bddf22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java @@ -197,9 +197,7 @@ public void validate(Collection phases) { } phase.getActions().forEach((actionName, action) -> { if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { - throw new IllegalArgumentException( - "invalid action [" + actionName + "] " + "defined in phase [" + phase.getName() + "]" - ); + throw new IllegalArgumentException("invalid action [" + actionName + "] defined in phase [" + phase.getName() + "]"); } }); }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java index ef2e807f2c247..31aaba551a3f3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowAction.java @@ -23,7 +23,7 @@ /** * Converts a CCR following index into a normal, standalone index, once the index is ready to be safely separated. - * + *

* "Readiness" is composed of two conditions: * 1) The index must have {@link LifecycleSettings#LIFECYCLE_INDEXING_COMPLETE} set to {@code true}, which is * done automatically by {@link RolloverAction} (or manually). diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStep.java index 6e07d4e6ac823..b896196185b37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UnfollowFollowerIndexStep.java @@ -43,7 +43,7 @@ void innerPerformAction(String followerIndex, ClusterState currentClusterState, if (exception instanceof ElasticsearchException e && e.getMetadata("es.failed_to_remove_retention_leases") != null) { List leasesNotRemoved = e.getMetadata("es.failed_to_remove_retention_leases"); logger.debug( - "failed to remove leader retention lease(s) {} while unfollowing index [{}], " + "continuing with lifecycle execution", + "failed to remove leader retention lease(s) {} while unfollowing index [{}], continuing with lifecycle execution", leasesNotRemoved, followerIndex ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStep.java index fe9101ea6edef..c0209ba7f6cc6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/UpdateRolloverLifecycleDateStep.java @@ -46,7 +46,7 @@ public ClusterState performAction(Index index, ClusterState currentState) { boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(indexMetadata.getSettings()); if (indexingComplete) { - logger.trace(indexMetadata.getIndex() + " has lifecycle complete set, skipping " + UpdateRolloverLifecycleDateStep.NAME); + logger.trace("{} has lifecycle complete set, skipping {}", indexMetadata.getIndex(), UpdateRolloverLifecycleDateStep.NAME); // The index won't have RolloverInfo if this is a Following index and indexing_complete was set by CCR, // so just use the current time. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java index 71c99d7f21848..989223ef48da7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsStep.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.List; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.cluster.metadata.IndexMetadata.parseIndexNameCounter; @@ -54,8 +53,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { IndexMetadata originalIndexMeta = metadata.index(index); if (originalIndexMeta == null) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "[%s] lifecycle action for index [%s] executed but index no longer exists", getKey().action(), index.getName() @@ -67,8 +65,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(originalIndexMeta.getSettings()); if (indexingComplete) { - String message = String.format( - Locale.ROOT, + String message = Strings.format( "index [%s] has lifecycle complete set, skipping [%s]", originalIndexMeta.getIndex().getName(), WaitForActiveShardsStep.NAME @@ -148,8 +145,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) { } private static Result getErrorResultOnNullMetadata(StepKey key, Index originalIndex) { - String errorMessage = String.format( - Locale.ROOT, + String errorMessage = Strings.format( "unable to find the index that was rolled over from [%s] as part of lifecycle action [%s]", originalIndex.getName(), key.action() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java index 9140ff5549e15..b4c66c2f5ac22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java @@ -15,11 +15,11 @@ import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; -import java.util.Locale; import java.util.Objects; import java.util.function.BiFunction; @@ -89,9 +89,8 @@ public Result isConditionMet(Index index, ClusterState clusterState) { IndexMetadata indexMetadata = clusterState.metadata().index(indexName); // check if the (potentially) derived index exists if (indexMetadata == null) { - String errorMessage = String.format( - Locale.ROOT, - "[%s] lifecycle action for index [%s] executed but the target index [%s] " + "does not exist", + String errorMessage = Strings.format( + "[%s] lifecycle action for index [%s] executed but the target index [%s] does not exist", getKey().action(), index.getName(), indexName diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java index aa20e33a3fbf2..7e074e8caea5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.step.info.EmptyInfo; -import java.util.Locale; import java.util.Objects; /** @@ -108,8 +107,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, if (Strings.isNullOrEmpty(rolloverAlias)) { listener.onFailure( new IllegalArgumentException( - String.format( - Locale.ROOT, + Strings.format( "setting [%s] for index [%s] is empty or not defined", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, index.getName() @@ -147,7 +145,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, boolean indexingComplete = LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING.get(indexMetadata.getSettings()); if (indexingComplete) { - logger.trace(index + " has lifecycle complete set, skipping " + WaitForRolloverReadyStep.NAME); + logger.trace("{} has lifecycle complete set, skipping {}", index, WaitForRolloverReadyStep.NAME); // If this index is still the write index for this alias, skipping rollover and continuing with the policy almost certainly // isn't what we want, as something likely still expects to be writing to this index. // If the alias doesn't point to this index, that's okay as that will be the result if this index is using a @@ -155,8 +153,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, if (aliasPointsToThisIndex && Boolean.TRUE.equals(isWriteIndex)) { listener.onFailure( new IllegalStateException( - String.format( - Locale.ROOT, + Strings.format( "index [%s] has [%s] set to [true], but is still the write index for alias [%s]", index.getName(), LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, @@ -175,8 +172,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, if (aliasPointsToThisIndex == false) { listener.onFailure( new IllegalArgumentException( - String.format( - Locale.ROOT, + Strings.format( "%s [%s] does not point to index [%s]", RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias, @@ -191,7 +187,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, if (Boolean.FALSE.equals(isWriteIndex)) { listener.onFailure( new IllegalArgumentException( - String.format(Locale.ROOT, "index [%s] is not the write index for alias [%s]", index.getName(), rolloverAlias) + Strings.format("index [%s] is not the write index for alias [%s]", index.getName(), rolloverAlias) ) ); return; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java index 7ce81fa90a557..74ab24445f74f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.ToXContentObject; @@ -21,7 +22,6 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; import java.util.Date; -import java.util.Locale; import java.util.Objects; /*** @@ -87,7 +87,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, logger.debug("skipping ILM policy execution because no last snapshot start date, action time: {}", actionTime); } else { logger.debug( - "skipping ILM policy execution because snapshot start time {} is before action time {}, snapshot timestamp " + "is {}", + "skipping ILM policy execution because snapshot start time {} is before action time {}, snapshot timestamp is {}", snapPolicyMeta.getLastSuccess().getSnapshotStartTimestamp(), actionTime, snapPolicyMeta.getLastSuccess().getSnapshotFinishTimestamp() @@ -134,14 +134,14 @@ public boolean isRetryable() { private ToXContentObject notExecutedMessage(long time) { return (builder, params) -> { builder.startObject(); - builder.field(MESSAGE_FIELD, String.format(Locale.ROOT, POLICY_NOT_EXECUTED_MESSAGE, policy, new Date(time))); + builder.field(MESSAGE_FIELD, Strings.format(POLICY_NOT_EXECUTED_MESSAGE, policy, new Date(time))); builder.endObject(); return builder; }; } private static IllegalStateException error(String message, Object... args) { - return new IllegalStateException(String.format(Locale.ROOT, message, args)); + return new IllegalStateException(Strings.format(message, args)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java index 38cc39c3bfd55..50a7d48672c8e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStep.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; @@ -17,14 +18,13 @@ import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo; import java.time.Instant; -import java.util.Locale; import java.util.function.Supplier; /** * This {@link Step} waits until the {@link org.elasticsearch.index.IndexSettings#TIME_SERIES_END_TIME} passes for time series indices. * For regular indices this step doesn't wait at all and the condition is evaluated to true immediately. - * - * Note that this step doens't execute an async/transport action and is able to evaluate its condition based on the local information + *

+ * Note that this step doesn't execute an async/transport action and is able to evaluate its condition based on the local information * available however, we want this step to be executed periodically using the `AsyncWaitStep` infrastructure. * The condition will be evaluated every {@link LifecycleSettings#LIFECYCLE_POLL_INTERVAL}. */ @@ -47,7 +47,7 @@ public boolean isRetryable() { public void evaluateCondition(Metadata metadata, Index index, Listener listener, TimeValue masterTimeout) { IndexMetadata indexMetadata = metadata.index(index); assert indexMetadata != null - : "the index metadata for index [" + index.getName() + "] must exist in the cluster state for step " + "[" + NAME + "]"; + : "the index metadata for index [" + index.getName() + "] must exist in the cluster state for step [" + NAME + "]"; if (IndexSettings.MODE.get(indexMetadata.getSettings()) != IndexMode.TIME_SERIES) { // this index is not a time series index so no need to wait @@ -60,8 +60,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, listener.onResponse( false, new SingleMessageFieldInfo( - String.format( - Locale.ROOT, + Strings.format( "The [%s] setting for index [%s] is [%s]. Waiting until the index's time series end time lapses before" + " proceeding with action [%s] as the index can still accept writes.", IndexSettings.TIME_SERIES_END_TIME.getKey(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java index 00e6a546be5a4..c21a4bfe03156 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java @@ -149,7 +149,6 @@ public String toString() { public static class TaskParams implements PersistentTaskParams, MlTaskParams { public static final MlConfigVersion VERSION_INTRODUCED = MlConfigVersion.V_7_3_0; - public static final TransportVersion TRANSPORT_VERSION_INTRODUCED = TransportVersions.V_7_3_0; public static final MlConfigVersion VERSION_DESTINATION_INDEX_MAPPINGS_CHANGED = MlConfigVersion.V_7_10_0; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -207,7 +206,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TRANSPORT_VERSION_INTRODUCED; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java index 3f1160df95e22..564dfbdf65e03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.security.action.oidc; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -54,10 +53,7 @@ public OpenIdConnectAuthenticateRequest(StreamInput in) throws IOException { redirectUri = in.readString(); state = in.readString(); nonce = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { - realm = in.readOptionalString(); - } - + realm = in.readOptionalString(); } public String getRedirectUri() { @@ -113,9 +109,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(redirectUri); out.writeString(state); out.writeString(nonce); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { - out.writeOptionalString(realm); - } + out.writeOptionalString(realm); } public String toString() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java index 0d6aba9406a3f..7a5bf9f9e1aea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java @@ -131,7 +131,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } @Override @@ -226,7 +226,7 @@ static Diff readLifecyclePolicyDiffFrom(StreamI @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java index ac495ddebab3c..f6aa8b884105f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java @@ -38,7 +38,7 @@ public SpatialFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } SpatialStatsAction.Response statsResponse() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index 9164fd88b6395..08e89a0fcab00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -492,7 +492,7 @@ private boolean hasMatchAllEquivalent( return false; } - private boolean canMatchShard(ShardId shardId, NodeTermsEnumRequest req) throws IOException { + private boolean canMatchShard(ShardId shardId, NodeTermsEnumRequest req) { if (req.indexFilter() == null || req.indexFilter() instanceof MatchAllQueryBuilder) { return true; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java index b2f7ac28210e2..3b5ed564bcb47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java @@ -131,20 +131,11 @@ public TransformState( public TransformState(StreamInput in) throws IOException { taskState = TransformTaskState.fromStream(in); indexerState = IndexerState.fromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - position = in.readOptionalWriteable(TransformIndexerPosition::new); - } else { - Map pos = in.readGenericMap(); - position = new TransformIndexerPosition(pos, null); - } + position = in.readOptionalWriteable(TransformIndexerPosition::new); checkpoint = in.readLong(); reason = in.readOptionalString(); progress = in.readOptionalWriteable(TransformProgress::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - node = in.readOptionalWriteable(NodeAttributes::new); - } else { - node = null; - } + node = in.readOptionalWriteable(NodeAttributes::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { shouldStopAtNextCheckpoint = in.readBoolean(); } else { @@ -241,17 +232,11 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { taskState.writeTo(out); indexerState.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - out.writeOptionalWriteable(position); - } else { - out.writeGenericMap(position != null ? position.getIndexerPosition() : null); - } + out.writeOptionalWriteable(position); out.writeLong(checkpoint); out.writeOptionalString(reason); out.writeOptionalWriteable(progress); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - out.writeOptionalWriteable(node); - } + out.writeOptionalWriteable(node); if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeBoolean(shouldStopAtNextCheckpoint); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java index 6d8ed0e33d7d8..49106c01b96b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java @@ -25,7 +25,7 @@ public VotingOnlyNodeFeatureSetUsage() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_3_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java index 06a3c0da856aa..415014623f340 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java @@ -182,7 +182,7 @@ public void testClusterExcludeFiltersConditionMetOnlyOneCopyAllocated() { Result expectedResult = new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(1, 1)); assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); } public void testExcludeConditionMetOnlyOneCopyAllocated() { @@ -496,7 +496,7 @@ public void testExecuteIndexMissing() throws Exception { Result actualResult = step.isConditionMet(index, clusterState); assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInfomationContext()); + assertNull(actualResult.getInformationContext()); } private void assertAllocateStatus( @@ -538,6 +538,6 @@ private void assertAllocateStatus( .build(); Result actualResult = step.isConditionMet(index, clusterState); assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java index e0957239e33a8..af9aa0982d61d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckNoDataStreamWriteIndexStepTests.java @@ -60,7 +60,7 @@ public void testStepCompleteIfIndexIsNotPartOfDataStream() { ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } public void testStepIncompleteIfIndexIsTheDataStreamWriteIndex() { @@ -95,7 +95,7 @@ public void testStepIncompleteIfIndexIsTheDataStreamWriteIndex() { String expectedIndexName = indexToOperateOn.getIndex().getName(); ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexToOperateOn.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat( info.getMessage(), is( @@ -162,6 +162,6 @@ public void testStepCompleteIfPartOfDataStreamButNotWriteIndex() { IndexMetadata indexToOperateOn = useFailureStore ? failureIndexMetadata : indexMetadata; ClusterStateWaitStep.Result result = createRandomInstance().isConditionMet(indexToOperateOn.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java index e46c40ca96ff7..371f7def67c52 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java @@ -418,7 +418,7 @@ public void testExecuteIndexMissing() throws Exception { ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); assertFalse(actualResult.isComplete()); - assertNull(actualResult.getInfomationContext()); + assertNull(actualResult.getInformationContext()); } public void testStepCompletableIfAllShardsActive() { @@ -576,7 +576,7 @@ public void testStepBecomesUncompletable() { ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); assertFalse(actualResult.isComplete()); assertThat( - Strings.toString(actualResult.getInfomationContext()), + Strings.toString(actualResult.getInformationContext()), containsString("node with id [node1] is currently marked as shutting down") ); assertFalse(step.isCompletable()); @@ -626,7 +626,7 @@ private void assertAllocateStatus( .build(); ClusterStateWaitStep.Result actualResult = step.isConditionMet(index, clusterState); assertEquals(expectedResult.isComplete(), actualResult.isComplete()); - assertEquals(expectedResult.getInfomationContext(), actualResult.getInfomationContext()); + assertEquals(expectedResult.getInformationContext(), actualResult.getInformationContext()); } public static UnassignedInfo randomUnassignedInfo(String message) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java index 2b5ad6fac6921..8eb8d0f395aba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckTargetShardsCountStepTests.java @@ -76,7 +76,7 @@ public void testStepIncompleteIfTargetShardsCountNotValid() { ClusterStateWaitStep.Result result = checkTargetShardsCountStep.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat( info.getMessage(), is( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java index eec2acf6da824..ea583b51c4c28 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ClusterStateWaitUntilThresholdStepTests.java @@ -71,7 +71,7 @@ public void testIndexIsMissingReturnsIncompleteResult() { ClusterState.EMPTY_STATE ); assertThat(result.isComplete(), is(false)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } public void testIsConditionMetForUnderlyingStep() { @@ -96,7 +96,7 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } { @@ -121,9 +121,9 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - assertThat(result.getInfomationContext(), notNullValue()); + assertThat(result.getInformationContext(), notNullValue()); WaitForIndexingCompleteStep.IndexingNotCompleteInfo info = (WaitForIndexingCompleteStep.IndexingNotCompleteInfo) result - .getInfomationContext(); + .getInformationContext(); assertThat( info.getMessage(), equalTo( @@ -155,7 +155,7 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); assertThat(underTest.getNextStepKey(), is(not(nextKeyOnThresholdBreach))); assertThat(underTest.getNextStepKey(), is(stepToExecute.getNextStepKey())); } @@ -185,8 +185,8 @@ public void testIsConditionMetForUnderlyingStep() { ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), notNullValue()); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + assertThat(result.getInformationContext(), notNullValue()); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat( info.getMessage(), equalTo( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java index 8b05a3156ed04..95c1f5c4aa96b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStepTests.java @@ -90,7 +90,7 @@ public void testExecuteWithUnassignedShard() { Result actualResult = step.isConditionMet(index, clusterState); assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInfomationContext(), is(expectedResult.getInfomationContext())); + assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); } public void testExecuteWithPendingShards() { @@ -130,7 +130,7 @@ public void testExecuteWithPendingShards() { Result actualResult = step.isConditionMet(index, clusterState); assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInfomationContext(), is(expectedResult.getInfomationContext())); + assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); } public void testExecuteWithPendingShardsAndTargetRoleNotPresentInCluster() { @@ -164,7 +164,7 @@ public void testExecuteWithPendingShardsAndTargetRoleNotPresentInCluster() { Result actualResult = step.isConditionMet(index, clusterState); assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInfomationContext(), is(expectedResult.getInfomationContext())); + assertThat(actualResult.getInformationContext(), is(expectedResult.getInformationContext())); } public void testExecuteIndexMissing() { @@ -175,7 +175,7 @@ public void testExecuteIndexMissing() { Result actualResult = step.isConditionMet(index, clusterState); assertThat(actualResult.isComplete(), is(false)); - assertThat(actualResult.getInfomationContext(), is(nullValue())); + assertThat(actualResult.getInformationContext(), is(nullValue())); } public void testExecuteIsComplete() { @@ -200,7 +200,7 @@ public void testExecuteIsComplete() { DataTierMigrationRoutedStep step = createRandomInstance(); Result result = step.isConditionMet(index, clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } public void testExecuteWithGenericDataNodes() { @@ -221,7 +221,7 @@ public void testExecuteWithGenericDataNodes() { DataTierMigrationRoutedStep step = createRandomInstance(); Result result = step.isConditionMet(index, clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBeActive() { @@ -246,7 +246,7 @@ public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBe Result result = step.isConditionMet(index, clusterState); assertThat(result.isComplete(), is(false)); - assertThat(result.getInfomationContext(), is(expectedResult.getInfomationContext())); + assertThat(result.getInformationContext(), is(expectedResult.getInformationContext())); } { @@ -267,7 +267,7 @@ public void testExecuteForIndexWithoutTierRoutingInformationWaitsForReplicasToBe Result result = step.isConditionMet(index, clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java index c4857a31b7a7a..dd82a648f0436 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.core.Strings; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.rest.RestStatus; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java index 3f4b1adf8253b..59eff971c1643 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkShardsAllocatedStepTests.java @@ -95,7 +95,7 @@ public void testConditionMet() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertTrue(result.isComplete()); - assertNull(result.getInfomationContext()); + assertNull(result.getInformationContext()); } public void testConditionNotMetBecauseOfActive() { @@ -138,7 +138,7 @@ public void testConditionNotMetBecauseOfActive() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), result.getInfomationContext()); + assertEquals(new ShrunkShardsAllocatedStep.Info(true, shrinkNumberOfShards, false), result.getInformationContext()); } public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { @@ -167,6 +167,6 @@ public void testConditionNotMetBecauseOfShrunkIndexDoesntExistYet() { Result result = step.isConditionMet(originalIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInfomationContext()); + assertEquals(new ShrunkShardsAllocatedStep.Info(false, -1, false), result.getInformationContext()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java index 3c5be3c0d337b..523404a00a0c5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrunkenIndexCheckStepTests.java @@ -60,7 +60,7 @@ public void testConditionMet() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertTrue(result.isComplete()); - assertNull(result.getInfomationContext()); + assertNull(result.getInformationContext()); } public void testConditionNotMetBecauseNotSameShrunkenIndex() { @@ -78,7 +78,7 @@ public void testConditionNotMetBecauseNotSameShrunkenIndex() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); } public void testConditionNotMetBecauseSourceIndexExists() { @@ -102,7 +102,7 @@ public void testConditionNotMetBecauseSourceIndexExists() { ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); Result result = step.isConditionMet(shrinkIndexMetadata.getIndex(), clusterState); assertFalse(result.isComplete()); - assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInfomationContext()); + assertEquals(new ShrunkenIndexCheckStep.Info(sourceIndex), result.getInformationContext()); } public void testIllegalState() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java index f5f36781e011b..e12bae3b92f80 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForActiveShardsTests.java @@ -250,7 +250,7 @@ public void testResultEvaluatedOnDataStream() throws IOException { JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS ); - String actualResultAsString = Strings.toString(result.getInfomationContext()); + String actualResultAsString = Strings.toString(result.getInformationContext()); assertThat(actualResultAsString, is(Strings.toString(expected))); assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active")); } @@ -294,7 +294,7 @@ public void testResultReportsMeaningfulMessage() throws IOException { JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS ); - String actualResultAsString = Strings.toString(result.getInfomationContext()); + String actualResultAsString = Strings.toString(result.getInformationContext()); assertThat(actualResultAsString, is(Strings.toString(expected))); assertThat(actualResultAsString, containsString("waiting for [3] shards to become active, but only [2] are active")); } @@ -318,7 +318,7 @@ public void testResultReportsErrorMessage() { ClusterStateWaitStep.Result result = step.isConditionMet(new Index("index-000000", UUID.randomUUID().toString()), clusterState); assertThat(result.isComplete(), is(false)); - String actualResultAsString = Strings.toString(result.getInfomationContext()); + String actualResultAsString = Strings.toString(result.getInformationContext()); assertThat( actualResultAsString, containsString( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java index 3c68e929df980..3247c02cd9bac 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStepTests.java @@ -81,9 +81,9 @@ private void verify(WaitForDataTierStep step, ClusterState state, boolean comple ClusterStateWaitStep.Result result = step.isConditionMet(null, state); assertThat(result.isComplete(), is(complete)); if (message != null) { - assertThat(Strings.toString(result.getInfomationContext()), containsString(message)); + assertThat(Strings.toString(result.getInformationContext()), containsString(message)); } else { - assertThat(result.getInfomationContext(), is(nullValue())); + assertThat(result.getInformationContext(), is(nullValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java index 3e3952fa11619..0ae7b02c7400a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java @@ -94,7 +94,7 @@ public void testConditionMetForGreen() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } public void testConditionNotMetForGreen() { @@ -120,7 +120,7 @@ public void testConditionNotMetForGreen() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is not green; not all shards are active")); } @@ -140,7 +140,7 @@ public void testConditionNotMetNoIndexRoutingTable() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); } @@ -168,7 +168,7 @@ public void testConditionMetForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } public void testConditionNotMetForYellow() { @@ -194,7 +194,7 @@ public void testConditionNotMetForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; not all primary shards are active")); } @@ -214,7 +214,7 @@ public void testConditionNotMetNoIndexRoutingTableForYellow() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.YELLOW); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat(info, notNullValue()); assertThat(info.getMessage(), equalTo("index is red; no indexRoutingTable")); } @@ -244,7 +244,7 @@ public void testStepReturnsFalseIfTargetIndexIsMissing() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN, indexPrefix); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); String targetIndex = indexPrefix + originalIndex.getIndex().getName(); assertThat( info.getMessage(), @@ -304,7 +304,7 @@ public void testStepWaitsForTargetIndexHealthWhenPrefixConfigured() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterTargetInitializing); assertThat(result.isComplete(), is(false)); - SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext(); + SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInformationContext(); assertThat(info.getMessage(), is("index is not green; not all shards are active")); } @@ -327,7 +327,7 @@ public void testStepWaitsForTargetIndexHealthWhenPrefixConfigured() { WaitForIndexColorStep step = new WaitForIndexColorStep(randomStepKey(), randomStepKey(), ClusterHealthStatus.GREEN); ClusterStateWaitStep.Result result = step.isConditionMet(originalIndex.getIndex(), clusterTargetInitializing); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java index 098c609cbbd33..ad5e4c9533c99 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexingCompleteStepTests.java @@ -66,7 +66,7 @@ public void testConditionMet() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } public void testConditionMetNotAFollowerIndex() { @@ -83,7 +83,7 @@ public void testConditionMetNotAFollowerIndex() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(true)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } public void testConditionNotMet() { @@ -105,9 +105,9 @@ public void testConditionNotMet() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(indexMetadata.getIndex(), clusterState); assertThat(result.isComplete(), is(false)); - assertThat(result.getInfomationContext(), notNullValue()); + assertThat(result.getInformationContext(), notNullValue()); WaitForIndexingCompleteStep.IndexingNotCompleteInfo info = (WaitForIndexingCompleteStep.IndexingNotCompleteInfo) result - .getInfomationContext(); + .getInformationContext(); assertThat( info.getMessage(), equalTo( @@ -123,6 +123,6 @@ public void testIndexDeleted() { WaitForIndexingCompleteStep step = createRandomInstance(); ClusterStateWaitStep.Result result = step.isConditionMet(new Index("this-index-doesnt-exist", "uuid"), clusterState); assertThat(result.isComplete(), is(false)); - assertThat(result.getInfomationContext(), nullValue()); + assertThat(result.getInformationContext(), nullValue()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java index 01a12fb795316..b67404956deb2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java @@ -86,9 +86,9 @@ public void testConditionMetWhenCCREnabled() { final SetOnce stepInfoHolder = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + public void onResponse(boolean conditionMet, ToXContentObject informationContext) { conditionMetHolder.set(conditionMet); - stepInfoHolder.set(infomationContext); + stepInfoHolder.set(informationContext); } @Override @@ -120,9 +120,9 @@ public void testConditionMetWhenCCRDisabled() { final SetOnce stepInfoHolder = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + public void onResponse(boolean conditionMet, ToXContentObject informationContext) { conditionMetHolder.set(conditionMet); - stepInfoHolder.set(infomationContext); + stepInfoHolder.set(informationContext); } @Override @@ -154,9 +154,9 @@ public void testConditionNotMet() { final SetOnce stepInfoHolder = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + public void onResponse(boolean conditionMet, ToXContentObject informationContext) { conditionMetHolder.set(conditionMet); - stepInfoHolder.set(infomationContext); + stepInfoHolder.set(informationContext); } @Override @@ -195,9 +195,9 @@ public void testNoShardStats() { final SetOnce stepInfoHolder = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + public void onResponse(boolean conditionMet, ToXContentObject informationContext) { conditionMetHolder.set(conditionMet); - stepInfoHolder.set(infomationContext); + stepInfoHolder.set(informationContext); } @Override @@ -235,12 +235,12 @@ public void testFailure() { final SetOnce exceptionHolder = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + public void onResponse(boolean conditionMet, ToXContentObject informationContext) { fail( "onResponse should not be called in this test, called with conditionMet: " + conditionMet + " and stepInfo: " - + Strings.toString(infomationContext) + + Strings.toString(informationContext) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index afb17644303bb..0264f7b09c6fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -237,7 +237,7 @@ public void testEvaluateCondition() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -289,7 +289,7 @@ public void testEvaluateConditionOnDataStreamTarget() { step.evaluateCondition(metadata, indexToOperateOn.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -359,7 +359,7 @@ public void testSkipRolloverIfDataStreamIsAlreadyRolledOver() { step.evaluateCondition(metadata, indexToOperateOn.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -471,7 +471,7 @@ public void testPerformActionWriteIndexIsFalse() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { fail("expecting failure as the write index must be set to true or null"); } @@ -512,7 +512,7 @@ public void testPerformActionWithIndexingComplete() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { conditionsMet.set(complete); } @@ -543,7 +543,7 @@ public void testPerformActionWithIndexingCompleteStillWriteIndex() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Should have failed with indexing_complete but index is not write index"); } @@ -573,7 +573,7 @@ public void testPerformActionNotComplete() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { actionCompleted.set(complete); } @@ -615,7 +615,7 @@ public void testPerformActionFailure() { step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } @@ -645,7 +645,7 @@ public void testPerformActionInvalidNullOrEmptyAlias() { SetOnce exceptionThrown = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } @@ -680,7 +680,7 @@ public void testPerformActionAliasDoesNotPointToIndex() { SetOnce exceptionThrown = new SetOnce<>(); step.evaluateCondition(Metadata.builder().put(indexMetadata, true).build(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { throw new AssertionError("Unexpected method call"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java index 50ed7ddcc3f33..8ca6c0016a791 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitUntilTimeSeriesEndTimePassesStepTests.java @@ -78,7 +78,7 @@ public void testEvaluateCondition() { step.evaluateCondition(clusterState.metadata(), previousGeneration, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { assertThat(complete, is(true)); } @@ -96,9 +96,9 @@ public void onFailure(Exception e) { step.evaluateCondition(clusterState.metadata(), writeIndex, new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { assertThat(complete, is(false)); - String information = Strings.toString(infomationContext); + String information = Strings.toString(informationContext); assertThat( information, containsString( @@ -130,7 +130,7 @@ public void onFailure(Exception e) { step.evaluateCondition(newMetadata, indexMeta.getIndex(), new AsyncWaitStep.Listener() { @Override - public void onResponse(boolean complete, ToXContentObject infomationContext) { + public void onResponse(boolean complete, ToXContentObject informationContext) { assertThat(complete, is(true)); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java index 81aea221ebb4f..50149ec2cbe58 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java @@ -386,7 +386,7 @@ public void collect(int docId, long owningBucketOrd) throws IOException { if (logger.isTraceEnabled()) { logger.trace( - "Doc: [{}] - _tsid: [{}], @timestamp: [{}}] -> downsample bucket ts: [{}]", + "Doc: [{}] - _tsid: [{}], @timestamp: [{}] -> downsample bucket ts: [{}]", docId, DocValueFormat.TIME_SERIES_ID.format(tsidHash), timestampFormat.format(timestamp), diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index d46639d700420..ef455acb645d9 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -172,7 +172,7 @@ public EnrichPlugin(final Settings settings) { if (settings.hasValue(CACHE_SIZE_SETTING_NAME)) { throw new IllegalArgumentException( Strings.format( - "Both [{}] and [{}] are set, please use [{}]", + "Both [%s] and [%s] are set, please use [%s]", CACHE_SIZE_SETTING_NAME, CACHE_SIZE_SETTING_BWC_NAME, CACHE_SIZE_SETTING_NAME diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index f8f1fe872711d..9566aeb8f28dc 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -34,6 +34,7 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.containsString; @@ -56,6 +57,11 @@ public class EsqlSecurityIT extends ESRestTestCase { .user("metadata1_read2", "x-pack-test-password", "metadata1_read2", false) .user("alias_user1", "x-pack-test-password", "alias_user1", false) .user("alias_user2", "x-pack-test-password", "alias_user2", false) + .user("logs_foo_all", "x-pack-test-password", "logs_foo_all", false) + .user("logs_foo_16_only", "x-pack-test-password", "logs_foo_16_only", false) + .user("logs_foo_after_2021", "x-pack-test-password", "logs_foo_after_2021", false) + .user("logs_foo_after_2021_pattern", "x-pack-test-password", "logs_foo_after_2021_pattern", false) + .user("logs_foo_after_2021_alias", "x-pack-test-password", "logs_foo_after_2021_alias", false) .build(); @Override @@ -342,6 +348,14 @@ public void testDocumentLevelSecurity() throws Exception { assertThat(respMap.get("values"), equalTo(List.of(List.of(10.0)))); } + public void testDocumentLevelSecurityFromStar() throws Exception { + Response resp = runESQLCommand("user3", "from in*x | stats sum=sum(value)"); + assertOK(resp); + Map respMap = entityAsMap(resp); + assertThat(respMap.get("columns"), equalTo(List.of(Map.of("name", "sum", "type", "double")))); + assertThat(respMap.get("values"), equalTo(List.of(List.of(10.0)))); + } + public void testFieldLevelSecurityAllow() throws Exception { Response resp = runESQLCommand("fls_user", "FROM index* | SORT value | LIMIT 1"); assertOK(resp); @@ -545,6 +559,22 @@ private void removeEnrichPolicy() throws Exception { client().performRequest(new Request("DELETE", "_enrich/policy/songs")); } + public void testDataStream() throws IOException { + createDataStream(); + MapMatcher twoResults = matchesMap().extraOk().entry("values", matchesList().item(matchesList().item(2))); + MapMatcher oneResult = matchesMap().extraOk().entry("values", matchesList().item(matchesList().item(1))); + assertMap(entityAsMap(runESQLCommand("logs_foo_all", "FROM logs-foo | STATS COUNT(*)")), twoResults); + assertMap(entityAsMap(runESQLCommand("logs_foo_16_only", "FROM logs-foo | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021", "FROM logs-foo | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021_pattern", "FROM logs-foo | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021_alias", "FROM alias-foo | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_all", "FROM logs-* | STATS COUNT(*)")), twoResults); + assertMap(entityAsMap(runESQLCommand("logs_foo_16_only", "FROM logs-* | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021", "FROM logs-* | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021_pattern", "FROM logs-* | STATS COUNT(*)")), oneResult); + assertMap(entityAsMap(runESQLCommand("logs_foo_after_2021_alias", "FROM alias-* | STATS COUNT(*)")), oneResult); + } + protected Response runESQLCommand(String user, String command) throws IOException { if (command.toLowerCase(Locale.ROOT).contains("limit") == false) { // add a (high) limit to avoid warnings on default limit @@ -592,4 +622,103 @@ static Settings randomPragmas() { } return settings.build(); } + + private void createDataStream() throws IOException { + createDataStreamPolicy(); + createDataStreamComponentTemplate(); + createDataStreamIndexTemplate(); + createDataStreamDocuments(); + createDataStreamAlias(); + } + + private void createDataStreamPolicy() throws IOException { + Request request = new Request("PUT", "_ilm/policy/my-lifecycle-policy"); + request.setJsonEntity(""" + { + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_primary_shard_size": "50gb" + } + } + }, + "delete": { + "min_age": "735d", + "actions": { + "delete": {} + } + } + } + } + }"""); + client().performRequest(request); + } + + private void createDataStreamComponentTemplate() throws IOException { + Request request = new Request("PUT", "_component_template/my-template"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "index.lifecycle.name": "my-lifecycle-policy" + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date", + "format": "date_optional_time||epoch_millis" + }, + "data_stream": { + "properties": { + "namespace": {"type": "keyword"} + } + } + } + } + } + }"""); + client().performRequest(request); + } + + private void createDataStreamIndexTemplate() throws IOException { + Request request = new Request("PUT", "_index_template/my-index-template"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-*"], + "data_stream": {}, + "composed_of": ["my-template"], + "priority": 500 + }"""); + client().performRequest(request); + } + + private void createDataStreamDocuments() throws IOException { + Request request = new Request("POST", "logs-foo/_bulk"); + request.addParameter("refresh", ""); + request.setJsonEntity(""" + { "create" : {} } + { "@timestamp": "2099-05-06T16:21:15.000Z", "data_stream": {"namespace": "16"} } + { "create" : {} } + { "@timestamp": "2001-05-06T16:21:15.000Z", "data_stream": {"namespace": "17"} } + """); + assertMap(entityAsMap(client().performRequest(request)), matchesMap().extraOk().entry("errors", false)); + } + + private void createDataStreamAlias() throws IOException { + Request request = new Request("PUT", "_alias"); + request.setJsonEntity(""" + { + "actions": [ + { + "add": { + "index": "logs-foo", + "alias": "alias-foo" + } + } + ] + }"""); + assertMap(entityAsMap(client().performRequest(request)), matchesMap().extraOk().entry("errors", false)); + } } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml index 5c0164782d181..365a072edb74e 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml @@ -92,3 +92,57 @@ fls_user: privileges: [ 'read' ] field_security: grant: [ value ] + +logs_foo_all: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + +logs_foo_16_only: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + query: | + { + "term": { + "data_stream.namespace": "16" + } + } + +logs_foo_after_2021: + cluster: [] + indices: + - names: [ 'logs-foo' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + +logs_foo_after_2021_pattern: + cluster: [] + indices: + - names: [ 'logs-*' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } + +logs_foo_after_2021_alias: + cluster: [] + indices: + - names: [ 'alias-foo' ] + privileges: [ 'read' ] + query: | + { + "range": { + "@timestamp": {"gte": "2021-01-01T00:00:00"} + } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index 4c01d326ed7bc..6014e24e39c5f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -118,6 +118,11 @@ public IndicesRequest indices(String... indices) { return this; } + @Override + public boolean includeDataStreams() { + return true; + } + @Override public IndicesOptions indicesOptions() { return indicesOptions; diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index 8ba88865e361a..89d80cf34aec5 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -469,27 +468,25 @@ public void testCanMatch() throws IOException { ).canMatch() ); - expectThrows(SearchContextMissingException.class, () -> { - ShardSearchContextId withoutCommitId = new ShardSearchContextId(contextId.getSessionId(), contextId.getId(), null); - sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00")); - assertFalse( - searchService.canMatch( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - shard.shardId(), - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null, - withoutCommitId, - null - ) - ).canMatch() - ); - }); + ShardSearchContextId withoutCommitId = new ShardSearchContextId(contextId.getSessionId(), contextId.getId(), null); + sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00")); + assertTrue( + searchService.canMatch( + new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1f, + -1, + null, + withoutCommitId, + null + ) + ).canMatch() + ); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java index 8129fd84d222c..77b143f93576b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTask.java @@ -180,7 +180,7 @@ public ClusterState doExecute(final ClusterState currentState) throws IOExceptio ); } } else { - final ToXContentObject stepInfo = result.getInfomationContext(); + final ToXContentObject stepInfo = result.getInformationContext(); if (logger.isTraceEnabled()) { logger.trace( "[{}] condition not met ({}) [{}], returning existing state (info: {})", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java index e56495bd3be05..cec4402c151a2 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTask.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; -import org.elasticsearch.core.Strings; +import org.elasticsearch.common.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.ilm.Step; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java index f9e403582a0ec..0f5b0c6e976d6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockBaseClient.java @@ -12,11 +12,13 @@ import org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockModel; import java.time.Clock; +import java.time.Instant; import java.util.Objects; public abstract class AmazonBedrockBaseClient implements AmazonBedrockClient { protected final Integer modelKeysAndRegionHashcode; protected Clock clock = Clock.systemUTC(); + protected volatile Instant expiryTimestamp; protected AmazonBedrockBaseClient(AmazonBedrockModel model, @Nullable TimeValue timeout) { Objects.requireNonNull(model); @@ -33,5 +35,10 @@ public final void setClock(Clock clock) { this.clock = clock; } + // used for testing + Instant getExpiryTimestamp() { + return this.expiryTimestamp; + } + abstract void close(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java index bd03909db380c..be90fbbd214d0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClient.java @@ -61,7 +61,6 @@ public class AmazonBedrockInferenceClient extends AmazonBedrockBaseClient { private final BedrockRuntimeAsyncClient internalClient; private final ThreadPool threadPool; - private volatile Instant expiryTimestamp; public static AmazonBedrockBaseClient create(AmazonBedrockModel model, @Nullable TimeValue timeout, ThreadPool threadPool) { try { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java index 339673e1302ac..9dfcddf777d8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCache.java @@ -35,20 +35,24 @@ public AmazonBedrockInferenceClientCache(BiFunction { - final AmazonBedrockBaseClient builtClient = creator.apply(model, timeout); - builtClient.setClock(clock); - builtClient.resetExpiration(); - return builtClient; + return clientsCache.compute(modelHash, (hashKey, client) -> { + AmazonBedrockBaseClient clientToUse = client; + if (clientToUse == null) { + clientToUse = creator.apply(model, timeout); + } + + // for testing - would be nice to refactor client factory in the future to take clock as parameter + clientToUse.setClock(clock); + clientToUse.resetExpiration(); + return clientToUse; }); } finally { cacheLock.readLock().unlock(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java index cc69df86933de..54728a92e6254 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java @@ -114,7 +114,7 @@ private URI createUri() throws URISyntaxException { } return new URI( - elasticInferenceServiceComponents().elasticInferenceServiceUrl() + "/api/v1/sparse-text-embedding/" + modelIdUriPath + elasticInferenceServiceComponents().elasticInferenceServiceUrl() + "/api/v1/sparse-text-embeddings/" + modelIdUriPath ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java index bb7c669cdf09b..f90b25ae51b4d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockInferenceClientCacheTests.java @@ -60,6 +60,36 @@ public void testCache_ReturnsSameObject() throws IOException { assertThat(cacheInstance.clientCount(), is(0)); } + public void testCache_ItUpdatesExpirationForExistingClients() throws IOException { + var clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + AmazonBedrockInferenceClientCache cacheInstance; + try (var cache = new AmazonBedrockInferenceClientCache(AmazonBedrockMockInferenceClient::create, clock)) { + cacheInstance = cache; + + var model = AmazonBedrockEmbeddingsModelTests.createModel( + "inferenceId", + "testregion", + "model", + AmazonBedrockProvider.AMAZONTITAN, + "access_key", + "secret_key" + ); + + var client = cache.getOrCreateClient(model, null); + var expiryTimestamp = client.getExpiryTimestamp(); + assertThat(cache.clientCount(), is(1)); + + // set clock to clock + 1 minutes so cache hasn't expired + cache.setClock(Clock.fixed(clock.instant().plus(Duration.ofMinutes(1)), ZoneId.systemDefault())); + + var regetClient = cache.getOrCreateClient(model, null); + + assertThat(client, sameInstance(regetClient)); + assertNotEquals(expiryTimestamp, regetClient.getExpiryTimestamp()); + } + assertThat(cacheInstance.clientCount(), is(0)); + } + public void testCache_ItEvictsExpiredClients() throws IOException { var clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); AmazonBedrockInferenceClientCache cacheInstance; @@ -76,6 +106,10 @@ public void testCache_ItEvictsExpiredClients() throws IOException { ); var client = cache.getOrCreateClient(model, null); + assertThat(cache.clientCount(), is(1)); + + // set clock to clock + 2 minutes + cache.setClock(Clock.fixed(clock.instant().plus(Duration.ofMinutes(2)), ZoneId.systemDefault())); var secondModel = AmazonBedrockEmbeddingsModelTests.createModel( "inferenceId_two", @@ -86,22 +120,25 @@ public void testCache_ItEvictsExpiredClients() throws IOException { "other_secret_key" ); - assertThat(cache.clientCount(), is(1)); - var secondClient = cache.getOrCreateClient(secondModel, null); assertThat(client, not(sameInstance(secondClient))); assertThat(cache.clientCount(), is(2)); - // set clock to after expiry + // set clock to after expiry of first client but not after expiry of second client cache.setClock(Clock.fixed(clock.instant().plus(Duration.ofMinutes(CLIENT_CACHE_EXPIRY_MINUTES + 1)), ZoneId.systemDefault())); - // get another client, this will ensure flushExpiredClients is called + // retrieve the second client, this will ensure flushExpiredClients is called var regetSecondClient = cache.getOrCreateClient(secondModel, null); assertThat(secondClient, sameInstance(regetSecondClient)); + // expired first client should have been flushed + assertThat(cache.clientCount(), is(1)); + var regetFirstClient = cache.getOrCreateClient(model, null); assertThat(client, not(sameInstance(regetFirstClient))); + + assertThat(cache.clientCount(), is(2)); } assertThat(cacheInstance.clientCount(), is(0)); } diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java index 62716e11f1720..7f2243ed76849 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.migrate.MigratePlugin; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamRequest; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import java.util.Collection; @@ -39,6 +40,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -50,6 +52,7 @@ protected Collection> nodePlugins() { } public void testNonExistentDataStream() { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); String nonExistentDataStreamName = randomAlphaOfLength(50); ReindexDataStreamRequest reindexDataStreamRequest = new ReindexDataStreamRequest( ReindexDataStreamAction.Mode.UPGRADE, @@ -63,12 +66,13 @@ public void testNonExistentDataStream() { } public void testAlreadyUpToDateDataStream() throws Exception { + assumeTrue("requires the migration reindex feature flag", REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()); String dataStreamName = randomAlphaOfLength(50).toLowerCase(Locale.ROOT); ReindexDataStreamRequest reindexDataStreamRequest = new ReindexDataStreamRequest( ReindexDataStreamAction.Mode.UPGRADE, dataStreamName ); - createDataStream(dataStreamName); + final int backingIndexCount = createDataStream(dataStreamName); ReindexDataStreamResponse response = client().execute( new ActionType(ReindexDataStreamAction.NAME), reindexDataStreamRequest @@ -78,7 +82,6 @@ public void testAlreadyUpToDateDataStream() throws Exception { AtomicReference runningTask = new AtomicReference<>(); for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { TaskManager taskManager = transportService.getTaskManager(); - Map tasksMap = taskManager.getCancellableTasks(); Optional> optionalTask = taskManager.getCancellableTasks() .entrySet() .stream() @@ -99,9 +102,24 @@ public void testAlreadyUpToDateDataStream() throws Exception { assertThat(task.getStatus().pending(), equalTo(0)); assertThat(task.getStatus().inProgress(), equalTo(0)); assertThat(task.getStatus().errors().size(), equalTo(0)); + + assertBusy(() -> { + GetMigrationReindexStatusAction.Response statusResponse = client().execute( + new ActionType(GetMigrationReindexStatusAction.NAME), + new GetMigrationReindexStatusAction.Request(dataStreamName) + ).actionGet(); + ReindexDataStreamStatus status = (ReindexDataStreamStatus) statusResponse.getTask().getTask().status(); + assertThat(status.complete(), equalTo(true)); + assertThat(status.errors(), equalTo(List.of())); + assertThat(status.exception(), equalTo(null)); + assertThat(status.pending(), equalTo(0)); + assertThat(status.inProgress(), equalTo(0)); + assertThat(status.totalIndices(), equalTo(backingIndexCount)); + assertThat(status.totalIndicesToBeUpgraded(), equalTo(0)); + }); } - private void createDataStream(String dataStreamName) { + private int createDataStream(String dataStreamName) { final TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = new TransportPutComposableIndexTemplateAction.Request("my-template"); putComposableTemplateRequest.indexTemplate( @@ -125,10 +143,13 @@ private void createDataStream(String dataStreamName) { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) ); assertThat(createDataStreamResponse.isAcknowledged(), is(true)); - indexDocs(dataStreamName); - safeGet(new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(false).execute()); - indexDocs(dataStreamName); - safeGet(new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(false).execute()); + int backingIndices = 1; + for (int i = 0; i < randomIntBetween(2, 5); i++) { + indexDocs(dataStreamName); + safeGet(new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(false).execute()); + backingIndices++; + } + return backingIndices; } private void indexDocs(String dataStreamName) { diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index ac9e38da07421..1af66a2c61d56 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -32,8 +32,11 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusTransportAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction; import org.elasticsearch.xpack.migrate.action.ReindexDataStreamTransportAction; +import org.elasticsearch.xpack.migrate.rest.RestGetMigrationReindexStatusAction; import org.elasticsearch.xpack.migrate.rest.RestMigrationReindexAction; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskExecutor; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskState; @@ -65,6 +68,7 @@ public List getRestHandlers( List handlers = new ArrayList<>(); if (REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()) { handlers.add(new RestMigrationReindexAction()); + handlers.add(new RestGetMigrationReindexStatusAction()); } return handlers; } @@ -74,6 +78,7 @@ public List getRestHandlers( List> actions = new ArrayList<>(); if (REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()) { actions.add(new ActionHandler<>(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class)); + actions.add(new ActionHandler<>(GetMigrationReindexStatusAction.INSTANCE, GetMigrationReindexStatusTransportAction.class)); } return actions; } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java new file mode 100644 index 0000000000000..68ccaef4bf02c --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusAction.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskResult; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static java.util.Objects.requireNonNull; + +public class GetMigrationReindexStatusAction extends ActionType { + + public static final GetMigrationReindexStatusAction INSTANCE = new GetMigrationReindexStatusAction(); + public static final String NAME = "indices:admin/migration/reindex_status"; + + public GetMigrationReindexStatusAction() { + super(NAME); + } + + public static class Response extends ActionResponse implements ToXContentObject { + private final TaskResult task; + + public Response(TaskResult task) { + this.task = requireNonNull(task, "task is required"); + } + + public Response(StreamInput in) throws IOException { + super(in); + task = in.readOptionalWriteable(TaskResult::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(task); + } + + /** + * Get the actual result of the fetch. + */ + public TaskResult getTask() { + return task; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Task.Status status = task.getTask().status(); + if (status != null) { + task.getTask().status().toXContent(builder, params); + } + return builder; + } + + @Override + public int hashCode() { + return Objects.hashCode(task); + } + + @Override + public boolean equals(Object other) { + return other instanceof Response && task.equals(((Response) other).task); + } + + @Override + public String toString() { + String toString = Strings.toString(this); + return toString.isEmpty() ? "unavailable" : toString; + } + + } + + public static class Request extends ActionRequest implements IndicesRequest { + private final String index; + + public Request(String index) { + super(); + this.index = index; + } + + public Request(StreamInput in) throws IOException { + super(in); + this.index = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public String getIndex() { + return index; + } + + @Override + public int hashCode() { + return Objects.hashCode(index); + } + + @Override + public boolean equals(Object other) { + return other instanceof Request && index.equals(((Request) other).index); + } + + public Request nodeRequest(String thisNodeId, long thisTaskId) { + Request copy = new Request(index); + copy.setParentTask(thisNodeId, thisTaskId); + return copy; + } + + @Override + public String[] indices() { + return new String[] { index }; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java new file mode 100644 index 0000000000000..f2a6e33f7cb05 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Strings; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskResult; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Request; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; + +import java.util.Map; +import java.util.Optional; + +public class GetMigrationReindexStatusTransportAction extends HandledTransportAction { + private final ClusterService clusterService; + private final TransportService transportService; + + @Inject + public GetMigrationReindexStatusTransportAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters + ) { + super(GetMigrationReindexStatusAction.NAME, transportService, actionFilters, Request::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.clusterService = clusterService; + this.transportService = transportService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + String index = request.getIndex(); + String persistentTaskId = ReindexDataStreamAction.TASK_ID_PREFIX + index; + PersistentTasksCustomMetadata persistentTasksCustomMetadata = clusterService.state() + .getMetadata() + .custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksCustomMetadata.PersistentTask persistentTask = persistentTasksCustomMetadata.getTask(persistentTaskId); + if (persistentTask == null) { + listener.onFailure(new ResourceNotFoundException("No migration reindex status found for [{}]", index)); + } else if (persistentTask.isAssigned()) { + String nodeId = persistentTask.getExecutorNode(); + if (clusterService.localNode().getId().equals(nodeId)) { + getRunningTaskFromNode(persistentTaskId, listener); + } else { + runOnNodeWithTaskIfPossible(task, request, nodeId, listener); + } + } else { + listener.onFailure(new ElasticsearchException("Persistent task with id [{}] is not assigned to a node", persistentTaskId)); + } + } + + private Task getRunningPersistentTaskFromTaskManager(String persistentTaskId) { + Optional> optionalTask = taskManager.getCancellableTasks() + .entrySet() + .stream() + .filter(entry -> entry.getValue().getType().equals("persistent")) + .filter( + entry -> entry.getValue() instanceof AllocatedPersistentTask + && persistentTaskId.equals((((AllocatedPersistentTask) entry.getValue()).getPersistentTaskId())) + ) + .findAny(); + return optionalTask.map(Map.Entry::getValue).orElse(null); + } + + void getRunningTaskFromNode(String persistentTaskId, ActionListener listener) { + Task runningTask = getRunningPersistentTaskFromTaskManager(persistentTaskId); + if (runningTask == null) { + listener.onFailure( + new ResourceNotFoundException( + Strings.format( + "Persistent task [{}] is supposed to be running on node [{}], " + "but the task is not found on that node", + persistentTaskId, + clusterService.localNode().getId() + ) + ) + ); + } else { + TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); + listener.onResponse(new Response(new TaskResult(false, info))); + } + } + + private void runOnNodeWithTaskIfPossible(Task thisTask, Request request, String nodeId, ActionListener listener) { + DiscoveryNode node = clusterService.state().nodes().get(nodeId); + if (node == null) { + listener.onFailure( + new ResourceNotFoundException( + Strings.format( + "Persistent task [{}] is supposed to be running on node [{}], but that node is not part of the cluster", + request.getIndex(), + nodeId + ) + ) + ); + } else { + Request nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId()); + transportService.sendRequest( + node, + GetMigrationReindexStatusAction.NAME, + nodeRequest, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, Response::new, EsExecutors.DIRECT_EXECUTOR_SERVICE) + ); + } + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java index eb7a910df8c0c..9e4cbb1082215 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java @@ -31,6 +31,7 @@ public class ReindexDataStreamAction extends ActionType { public static final FeatureFlag REINDEX_DATA_STREAM_FEATURE_FLAG = new FeatureFlag("reindex_data_stream"); + public static final String TASK_ID_PREFIX = "reindex-data-stream-"; public static final ReindexDataStreamAction INSTANCE = new ReindexDataStreamAction(); public static final String NAME = "indices:admin/data_stream/reindex"; diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java index 7f68007f821ba..95a078690a055 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java @@ -26,6 +26,8 @@ import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.TASK_ID_PREFIX; + /* * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation * of the persistent task, it responds with the persistent task id so that the user can monitor the persistent task. @@ -87,6 +89,6 @@ protected void doExecute(Task task, ReindexDataStreamRequest request, ActionList } private String getPersistentTaskId(String dataStreamName) throws ResourceAlreadyExistsException { - return "reindex-data-stream-" + dataStreamName; + return TASK_ID_PREFIX + dataStreamName; } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestGetMigrationReindexStatusAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestGetMigrationReindexStatusAction.java new file mode 100644 index 0000000000000..759104dd6f100 --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestGetMigrationReindexStatusAction.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.rest; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestGetMigrationReindexStatusAction extends BaseRestHandler { + + @Override + public String getName() { + return "get_migration_reindex_status_action"; + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_migration/reindex/{index}/_status")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String index = request.param("index"); + GetMigrationReindexStatusAction.Request getTaskRequest = new GetMigrationReindexStatusAction.Request(index); + return channel -> client.execute(GetMigrationReindexStatusAction.INSTANCE, getTaskRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestMigrationReindexAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestMigrationReindexAction.java index a7f630d68234d..19cb439495e9a 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestMigrationReindexAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/rest/RestMigrationReindexAction.java @@ -20,11 +20,16 @@ import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; public class RestMigrationReindexAction extends BaseRestHandler { + public static final String MIGRATION_REINDEX_CAPABILITY = "migration_reindex"; @Override public String getName() { @@ -49,6 +54,15 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } + @Override + public Set supportedCapabilities() { + Set capabilities = new HashSet<>(); + if (REINDEX_DATA_STREAM_FEATURE_FLAG.isEnabled()) { + capabilities.add(MIGRATION_REINDEX_CAPABILITY); + } + return Collections.unmodifiableSet(capabilities); + } + static class ReindexDataStreamRestToXContentListener extends RestBuilderListener { ReindexDataStreamRestToXContentListener(RestChannel channel) { diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionRequestTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionRequestTests.java new file mode 100644 index 0000000000000..6943cf26f2b5e --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionRequestTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Request; + +import java.io.IOException; + +public class GetMigrationReindexStatusActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLength(100)); + } + + @Override + protected Request mutateInstance(Request instance) throws IOException { + return createTestInstance(); // There's only one field + } +} diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java new file mode 100644 index 0000000000000..a18030edbf42c --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusActionResponseTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.tasks.RawTaskStatus; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskResult; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.migrate.action.GetMigrationReindexStatusAction.Response; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.TreeMap; + +public class GetMigrationReindexStatusActionResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return Response::new; + } + + @Override + protected Response createTestInstance() { + try { + return new Response(randomTaskResult()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Response mutateInstance(Response instance) throws IOException { + return createTestInstance(); // There's only one field + } + + private static TaskResult randomTaskResult() throws IOException { + return switch (between(0, 2)) { + case 0 -> new TaskResult(randomBoolean(), randomTaskInfo()); + case 1 -> new TaskResult(randomTaskInfo(), new RuntimeException("error")); + case 2 -> new TaskResult(randomTaskInfo(), randomTaskResponse()); + default -> throw new UnsupportedOperationException("Unsupported random TaskResult constructor"); + }; + } + + static TaskInfo randomTaskInfo() { + String nodeId = randomAlphaOfLength(5); + TaskId taskId = randomTaskId(nodeId); + String type = randomAlphaOfLength(5); + String action = randomAlphaOfLength(5); + Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; + String description = randomBoolean() ? randomAlphaOfLength(5) : null; + long startTime = randomLong(); + long runningTimeNanos = randomNonNegativeLong(); + boolean cancellable = randomBoolean(); + boolean cancelled = cancellable && randomBoolean(); + TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(randomAlphaOfLength(5)); + Map headers = randomBoolean() + ? Collections.emptyMap() + : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); + return new TaskInfo( + taskId, + type, + nodeId, + action, + description, + status, + startTime, + runningTimeNanos, + cancellable, + cancelled, + parentTaskId, + headers + ); + } + + private static TaskId randomTaskId(String nodeId) { + return new TaskId(nodeId, randomLong()); + } + + private static RawTaskStatus randomRawTaskStatus() { + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.startObject(); + int fields = between(0, 10); + for (int f = 0; f < fields; f++) { + builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + builder.endObject(); + return new RawTaskStatus(BytesReference.bytes(builder)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static ToXContent randomTaskResponse() { + Map result = new TreeMap<>(); + int fields = between(0, 10); + for (int f = 0; f < fields; f++) { + result.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return (builder, params) -> { + for (Map.Entry entry : result.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + return builder; + }; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(NetworkModule.getNamedWriteables()); + // return new NamedWriteableRegistry(List.of(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, + // RawTaskStatus::new))); + } +} diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java index 0ef7eebffadaf..4638ca1a21b01 100644 --- a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java @@ -297,6 +297,6 @@ protected boolean doEquals(PinnedQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index 3ee49cce85a8a..6115bec91ad62 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -355,6 +355,18 @@ protected void assertExecutorIsIdle(String executorName) throws Exception { }); } + protected static void waitUntilRecoveryIsDone(String index) throws Exception { + assertBusy(() -> { + RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries(index).get(); + assertThat(recoveryResponse.hasRecoveries(), equalTo(true)); + for (List value : recoveryResponse.shardRecoveryStates().values()) { + for (RecoveryState recoveryState : value) { + assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); + } + } + }); + } + public static class LicensedSnapshotBasedRecoveriesPlugin extends SnapshotBasedRecoveriesPlugin { public LicensedSnapshotBasedRecoveriesPlugin(Settings settings) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 23e414c0dc1bf..291161c090c27 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.searchablesnapshots; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; @@ -36,7 +35,6 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.snapshots.SnapshotId; @@ -1324,18 +1322,6 @@ private static IndexMetadata getIndexMetadata(String indexName) { .index(indexName); } - private static void waitUntilRecoveryIsDone(String index) throws Exception { - assertBusy(() -> { - RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries(index).get(); - assertThat(recoveryResponse.hasRecoveries(), equalTo(true)); - for (List value : recoveryResponse.shardRecoveryStates().values()) { - for (RecoveryState recoveryState : value) { - assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); - } - } - }); - } - private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java new file mode 100644 index 0000000000000..f77a3f5698c98 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsSearchIntegTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.hamcrest.Matchers.equalTo; + +public class SearchableSnapshotsSearchIntegTests extends BaseFrozenSearchableSnapshotsIntegTestCase { + + /** + * Tests basic search functionality with a query sorted by field against partially mounted indices + * The can match phase is always executed against read only indices, and for sorted queries it extracts the min and max range from + * each shard. This will happen not only in the can match phase, but optionally also in the query phase. + * See {@link org.elasticsearch.search.internal.ShardSearchRequest#canReturnNullResponseIfMatchNoDocs()}. + * For keyword fields, it is not possible to retrieve min and max from the index reader on frozen, hence we need to make sure that + * while that fails, the query will go ahead and won't return shard failures. + */ + public void testKeywordSortedQueryOnFrozen() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + + String[] indices = new String[] { "index-0001", "index-0002" }; + for (String index : indices) { + Settings extraSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build(); + // we use a high number of shards because that's more likely to trigger can match as part of query phase: + // see ShardSearchRequest#canReturnNullResponseIfMatchNoDocs + assertAcked( + indicesAdmin().prepareCreate(index) + .setSettings(indexSettingsNoReplicas(10).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + } + ensureGreen(indices); + + for (String index : indices) { + final List indexRequestBuilders = new ArrayList<>(); + indexRequestBuilders.add(prepareIndex(index).setSource("keyword", "value1")); + indexRequestBuilders.add(prepareIndex(index).setSource("keyword", "value2")); + indexRandom(true, false, indexRequestBuilders); + assertThat( + indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), + equalTo(0) + ); + refresh(index); + forceMerge(); + } + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indices[0])).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indices[0])); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + String[] mountedIndices = new String[indices.length]; + for (int i = 0; i < indices.length; i++) { + + String index = indices[i]; + String mountedIndex = index + "-mounted"; + mountedIndices[i] = mountedIndex; + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + mountedIndex, + repositoryName, + snapshotId.getName(), + indices[0], + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + } + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + for (String mountedIndex : mountedIndices) { + waitUntilRecoveryIsDone(mountedIndex); + } + ensureGreen(mountedIndices); + + SearchRequest request = new SearchRequest(mountedIndices).searchType(SearchType.QUERY_THEN_FETCH) + .source(SearchSourceBuilder.searchSource().sort("keyword.keyword")) + .allowPartialSearchResults(false); + if (randomBoolean()) { + request.setPreFilterShardSize(100); + } + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(20)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(20)); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(4L)); + }); + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index c91314716cf9e..db87fdbcb8f1f 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.operator; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.util.FeatureFlag; import java.util.Objects; import java.util.Set; @@ -494,7 +495,6 @@ public class Constants { "indices:admin/block/add[s]", "indices:admin/cache/clear", "indices:admin/data_stream/lazy_rollover", - "indices:admin/data_stream/reindex", "indices:internal/admin/ccr/restore/file_chunk/get", "indices:internal/admin/ccr/restore/session/clear", "indices:internal/admin/ccr/restore/session/put", @@ -637,6 +637,8 @@ public class Constants { "internal:gateway/local/started_shards", "internal:admin/indices/prevalidate_shard_path", "internal:index/metadata/migration_version/update", + new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/migration/reindex_status" : null, + new FeatureFlag("reindex_data_stream").isEnabled() ? "indices:admin/data_stream/reindex" : null, "internal:admin/repository/verify", "internal:admin/repository/verify/coordinate" ).filter(Objects::nonNull).collect(Collectors.toUnmodifiableSet()); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java index c786b47e2a9a1..68ac10801a40d 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java @@ -176,6 +176,6 @@ public static ShapeQueryBuilder fromXContent(XContentParser parser) throws IOExc @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_4_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml index 01a41b3aa8c94..f50a7a65f53d3 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/10_reindex.yml @@ -6,6 +6,13 @@ setup: --- "Test Reindex With Unsupported Mode": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] - do: catch: /illegal_argument_exception/ migrate.reindex: @@ -19,6 +26,13 @@ setup: --- "Test Reindex With Nonexistent Data Stream": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] - do: catch: /resource_not_found_exception/ migrate.reindex: @@ -44,6 +58,13 @@ setup: --- "Test Reindex With Bad Data Stream Name": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] - do: catch: /illegal_argument_exception/ migrate.reindex: @@ -57,6 +78,13 @@ setup: --- "Test Reindex With Existing Data Stream": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] - do: indices.put_index_template: name: my-template1 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml new file mode 100644 index 0000000000000..ae343a0b4db95 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/migrate/20_reindex_status.yml @@ -0,0 +1,70 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test get reindex status with nonexistent task id": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] + - do: + catch: /resource_not_found_exception/ + migrate.get_reindex_status: + index: "does_not_exist" + +--- +"Test Reindex With Existing Data Stream": + - requires: + reason: "migration reindex is behind a feature flag" + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_migration/reindex + capabilities: [migration_reindex] + - do: + indices.put_index_template: + name: my-template1 + body: + index_patterns: [my-data-stream*] + template: + mappings: + properties: + '@timestamp': + type: date + 'foo': + type: keyword + data_stream: {} + + - do: # superuser + indices.create_data_stream: + name: my-data-stream + - is_true: acknowledged + +# Uncomment once the cancel API is in place +# - do: +# migrate.reindex: +# body: | +# { +# "mode": "upgrade", +# "source": { +# "index": "my-data-stream" +# } +# } +# - match: { acknowledged: true } +# +# - do: +# migrate.get_reindex_status: +# index: "my-data-stream" +# - match: { complete: true } +# - match: { total_indices: 1 } +# - match: { total_indices_requiring_upgrade: 0 } +# - match: { successes: 0 } +# - match: { in_progress: 0 } +# - match: { pending: 0 } +# - match: { errors: [] } diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java index eeec67a0580fa..8400e7df54f4d 100644 --- a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java @@ -11,6 +11,7 @@ import org.elasticsearch.example.realm.CustomRealm; import org.elasticsearch.example.realm.CustomRoleMappingRealm; import org.elasticsearch.example.role.CustomInMemoryRolesProvider; +import org.elasticsearch.jdk.RuntimeVersionFeature; import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -35,11 +36,13 @@ public class ExampleSecurityExtension implements SecurityExtension { static { - // check that the extension's policy works. - AccessController.doPrivileged((PrivilegedAction) () -> { - System.getSecurityManager().checkPropertyAccess("myproperty"); - return null; - }); + if (RuntimeVersionFeature.isSecurityManagerAvailable()) { + // check that the extension's policy works. + AccessController.doPrivileged((PrivilegedAction) () -> { + System.getSecurityManager().checkPropertyAccess("myproperty"); + return null; + }); + } } @Override