diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 48c888acd35e2..7475e77bc0805 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -163,6 +163,10 @@ org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) +@defaultMessage Do not construct this records outside the source files they are declared in +org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult) +org.elasticsearch.cluster.SnapshotDeletionsInProgress$Entry#(java.lang.String, java.util.List, long, long, org.elasticsearch.cluster.SnapshotDeletionsInProgress$State, java.lang.String) + @defaultMessage Use a Thread constructor with a name, anonymous threads are more difficult to debug java.lang.Thread#(java.lang.Runnable) java.lang.Thread#(java.lang.ThreadGroup, java.lang.Runnable) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java index 86df3544ddfc6..ca2cbc09f7c2f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -201,14 +201,20 @@ public void beforeStart() { try { mockServer.start(); node.setting("telemetry.metrics.enabled", "true"); + node.setting("tracing.apm.agent.enabled", "true"); + node.setting("tracing.apm.agent.transaction_sample_rate", "0.10"); + node.setting("tracing.apm.agent.metrics_interval", "10s"); node.setting("tracing.apm.agent.server_url", "http://127.0.0.1:" + mockServer.getPort()); } catch (IOException e) { logger.warn("Unable to start APM server", e); } - } else if (node.getSettingKeys().contains("telemetry.metrics.enabled") == false) { - // in serverless metrics are enabled by default - // if metrics were not enabled explicitly for gradlew run we should disable them + } + // in serverless metrics are enabled by default + // if metrics were not enabled explicitly for gradlew run we should disable them + else if (node.getSettingKeys().contains("telemetry.metrics.enabled") == false) { // metrics node.setting("telemetry.metrics.enabled", "false"); + } else if (node.getSettingKeys().contains("tracing.apm.agent.enabled") == false) { // tracing + node.setting("tracing.apm.agent.enable", "false"); } } diff --git a/docs/changelog/103084.yaml b/docs/changelog/103084.yaml new file mode 100644 index 0000000000000..fb5a718a086de --- /dev/null +++ b/docs/changelog/103084.yaml @@ -0,0 +1,6 @@ +pr: 103084 +summary: Return `matched_queries` in Percolator +area: Percolator +type: enhancement +issues: + - 10163 diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc new file mode 100644 index 0000000000000..a777d5919f71a --- /dev/null +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -0,0 +1,37 @@ +[[connector-apis]] +== Connector APIs + +preview::[] + +++++ +Connector APIs +++++ + +--- + +The connector and sync jobs API provides a convenient way to create and manage Elastic connectors and sync jobs in an internal index. + +This API provides an alternative to relying solely on {kib} UI for connector and sync job management. The API comes with a set of +validations and assertions to ensure that the state representation in the internal index remains valid. + +[discrete] +[[elastic-connector-apis]] +=== Connector APIs + +You can use these APIs to create, get, delete and update connectors. + +Use the following APIs to manage connectors: + +* <> + + +[discrete] +[[sync-job-apis]] +=== Sync Job APIs + +You can use these APIs to create, cancel, delete and update sync jobs. + +Use the following APIs to manage sync jobs: + + +include::create-connector-api.asciidoc[] diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc new file mode 100644 index 0000000000000..b62ca4ad070a4 --- /dev/null +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -0,0 +1,128 @@ +[[create-connector-api]] +=== Create connector API +++++ +Create connector +++++ + +Creates a connector. + + +[source,console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +//// +[source,console] +---- +DELETE _connector/my-connector +---- +// TEST[continued] +//// + +[[create-connector-api-request]] +==== {api-request-title} +`POST _connector` + +`PUT _connector/` + + +[[create-connector-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `service_type` parameter should reference an existing connector service type. + + +[[create-connector-api-desc]] +==== {api-description-title} + +Creates a connector document in the internal index and initializes its configuration, filtering, and scheduling with default values. These values can be updated later as needed. + +[[create-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Unique identifier of a connector. + + +[role="child_attributes"] +[[create-connector-api-request-body]] +==== {api-request-body-title} + +`description`:: +(Optional, string) The description of the connector. + +`index_name`:: +(Required, string) The target index for syncing data by the connector. + +`name`:: +(Optional, string) The name of the connector. + +`is_native`:: +(Optional, boolean) Indicates if it's a native connector. Defaults to `false`. + +`language`:: +(Optional, string) Language analyzer for the data. Limited to supported languages. + +`service_type`:: +(Optional, string) Connector service type. Can reference Elastic-supported connector types or a custom connector type. + + +[role="child_attributes"] +[[create-connector-api-response-body]] +==== {api-response-body-title} + +`id`:: + (string) The ID associated with the connector document. Returned when using a POST request. + +`result`:: + (string) The result of the indexing operation, `created` or `updated`. Returned when using a PUT request. + +[[create-connector-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that an existing connector was updated successfully. + +`201`:: +Indicates that the connector was created successfully. + +`400`:: +Indicates that the request was malformed. + +[[create-connector-api-example]] +==== {api-examples-title} + +[source,console] +---- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "description": "My Connector to sync data to Elastic index from Google Drive", + "service_type": "google_drive", + "language": "english" +} +---- + + +The API returns the following result: + +[source,console-result] +---- +{ + "result": "created" +} +---- +//// +[source,console] +---- +DELETE _connector/my-connector +---- +// TEST[continued] +//// diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index 32e97b7316d84..2d4daa6ad2eca 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -2,19 +2,91 @@ [[esql-binary-operators]] === Binary operators -These binary comparison operators are supported: +[[esql-binary-operators-equality]] +==== Equality +[.text-center] +image::esql/functions/signature/equals.svg[Embedded,opts=inline] + +Supported types: + +include::types/equals.asciidoc[] + +==== Inequality `!=` +[.text-center] +image::esql/functions/signature/not_equals.svg[Embedded,opts=inline] -* equality: `==` -* inequality: `!=` -* less than: `<` -* less than or equal: `<=` -* larger than: `>` -* larger than or equal: `>=` +Supported types: -And these mathematical operators are supported: +include::types/not_equals.asciidoc[] +==== Less than `<` +[.text-center] +image::esql/functions/signature/less_than.svg[Embedded,opts=inline] + +Supported types: + +include::types/less_than.asciidoc[] + +==== Less than or equal to `<=` +[.text-center] +image::esql/functions/signature/less_than_or_equal.svg[Embedded,opts=inline] + +Supported types: + +include::types/less_than_or_equal.asciidoc[] + +==== Greater than `>` +[.text-center] +image::esql/functions/signature/greater_than.svg[Embedded,opts=inline] + +Supported types: + +include::types/greater_than.asciidoc[] + +==== Greater than or equal to `>=` +[.text-center] +image::esql/functions/signature/greater_than_or_equal.svg[Embedded,opts=inline] + +Supported types: + +include::types/greater_than_or_equal.asciidoc[] + +==== Add `+` [.text-center] image::esql/functions/signature/add.svg[Embedded,opts=inline] +Supported types: + +include::types/add.asciidoc[] + +==== Subtract `-` [.text-center] image::esql/functions/signature/sub.svg[Embedded,opts=inline] + +Supported types: + +include::types/sub.asciidoc[] + +==== Multiply `*` +[.text-center] +image::esql/functions/signature/mul.svg[Embedded,opts=inline] + +Supported types: + +include::types/mul.asciidoc[] + +==== Divide `/` +[.text-center] +image::esql/functions/signature/div.svg[Embedded,opts=inline] + +Supported types: + +include::types/div.asciidoc[] + +==== Modulus `%` +[.text-center] +image::esql/functions/signature/mod.svg[Embedded,opts=inline] + +Supported types: + +include::types/mod.asciidoc[] diff --git a/docs/reference/esql/functions/signature/greater_than_or_equal.svg b/docs/reference/esql/functions/signature/greater_than_or_equal.svg new file mode 100644 index 0000000000000..6afb36d4b4eff --- /dev/null +++ b/docs/reference/esql/functions/signature/greater_than_or_equal.svg @@ -0,0 +1 @@ +lhs>=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/less_than_or_equal.svg b/docs/reference/esql/functions/signature/less_than_or_equal.svg new file mode 100644 index 0000000000000..da93c172b7136 --- /dev/null +++ b/docs/reference/esql/functions/signature/less_than_or_equal.svg @@ -0,0 +1 @@ +lhs<=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_degrees.svg b/docs/reference/esql/functions/signature/to_degrees.svg deleted file mode 100644 index 01fe0a4770156..0000000000000 --- a/docs/reference/esql/functions/signature/to_degrees.svg +++ /dev/null @@ -1 +0,0 @@ -TO_DEGREES(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc new file mode 100644 index 0000000000000..7783d08bc3aaa --- /dev/null +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -0,0 +1,12 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +date_period | date_period | date_period +date_period | datetime | datetime +datetime | date_period | datetime +datetime | time_duration | datetime +double | double | double +integer | integer | integer +long | long | long +time_duration | time_duration | time_duration +|=== diff --git a/docs/reference/esql/functions/types/div.asciidoc b/docs/reference/esql/functions/types/div.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/div.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/mod.asciidoc b/docs/reference/esql/functions/types/mod.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/mod.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/mul.asciidoc b/docs/reference/esql/functions/types/mul.asciidoc new file mode 100644 index 0000000000000..eee2d68e4653f --- /dev/null +++ b/docs/reference/esql/functions/types/mul.asciidoc @@ -0,0 +1,7 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +double | double | double +integer | integer | integer +long | long | long +|=== diff --git a/docs/reference/esql/functions/types/to_degrees.asciidoc b/docs/reference/esql/functions/types/neg.asciidoc similarity index 50% rename from docs/reference/esql/functions/types/to_degrees.asciidoc rename to docs/reference/esql/functions/types/neg.asciidoc index 7cb7ca46022c2..1b841483fb22e 100644 --- a/docs/reference/esql/functions/types/to_degrees.asciidoc +++ b/docs/reference/esql/functions/types/neg.asciidoc @@ -1,8 +1,9 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== v | result +date_period | date_period double | double -integer | double -long | double -unsigned_long | double +integer | integer +long | long +time_duration | time_duration |=== diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc new file mode 100644 index 0000000000000..27fb19b6d38a2 --- /dev/null +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +integer | integer | boolean +|=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc new file mode 100644 index 0000000000000..ed26adf06ecde --- /dev/null +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -0,0 +1,11 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +lhs | rhs | result +date_period | date_period | date_period +datetime | date_period | datetime +datetime | time_duration | datetime +double | double | double +integer | integer | integer +long | long | long +time_duration | time_duration | time_duration +|=== diff --git a/docs/reference/esql/functions/unary.asciidoc b/docs/reference/esql/functions/unary.asciidoc index 2ee35b6c6256f..69ce754c1b4a0 100644 --- a/docs/reference/esql/functions/unary.asciidoc +++ b/docs/reference/esql/functions/unary.asciidoc @@ -2,7 +2,11 @@ [[esql-unary-operators]] === Unary operators -These unary mathematical operators are supported: +The only unary operators is negation (`-`): [.text-center] image::esql/functions/signature/neg.svg[Embedded,opts=inline] + +Supported types: + +include::types/neg.asciidoc[] diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index c5a3ebb782edd..25b995eefc219 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -326,6 +326,7 @@ case the search request would fail with a version conflict error. The search response returned is identical as in the previous example. + ==== Percolate query and highlighting The `percolate` query is handled in a special way when it comes to highlighting. The queries hits are used @@ -549,6 +550,136 @@ The slightly different response: <1> The highlight fields have been prefixed with the document slot they belong to, in order to know which highlight field belongs to what document. +==== Named queries within percolator queries + +If a stored percolator query is a complex query, and you want to track which +its sub-queries matched a percolated document, then you can use the `\_name` +parameter for its sub-queries. In this case, in a response, each hit together with +a `_percolator_document_slot` field contains +`_percolator_document_slot__matched_queries` fields that show +which sub-queries matched each percolated document. + +For example: + +[source,console] +-------------------------------------------------- +PUT /my-index-000001/_doc/5?refresh +{ + "query": { + "bool": { + "should": [ + { + "match": { + "message": { + "query": "Japanese art", + "_name": "query1" + } + } + }, + { + "match": { + "message": { + "query": "Holand culture", + "_name": "query2" + } + } + } + ] + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console] +-------------------------------------------------- +GET /my-index-000001/_search +{ + "query": { + "percolate": { + "field": "query", + "documents": [ + { + "message": "Japanse art" + }, + { + "message": "Holand culture" + }, + { + "message": "Japanese art and Holand culture" + }, + { + "message": "no-match" + } + ] + } + } +} +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +-------------------------------------------------- +{ + "took": 55, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total" : { + "value": 1, + "relation": "eq" + }, + "max_score": 1.1181908, + "hits": [ + { + "_index": "my-index-000001", + "_id": "5", + "_score": 1.1181908, + "_source": { + "query": { + "bool": { + "should": [ + { + "match": { + "message": { + "query": "Japanese art", + "_name": "query1" + } + } + }, + { + "match": { + "message": { + "query": "Holand culture", + "_name": "query2" + } + } + } + ] + } + } + }, + "fields" : { + "_percolator_document_slot" : [0, 1, 2], + "_percolator_document_slot_0_matched_queries" : ["query1"], <1> + "_percolator_document_slot_1_matched_queries" : ["query2"], <2> + "_percolator_document_slot_2_matched_queries" : ["query1", "query2"] <3> + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 55,/"took": "$body.took",/] +<1> The first document matched only the first sub-query. +<2> The second document matched only the second sub-query. +<3> The third document matched both sub-queries. + ==== Specifying multiple percolate queries It is possible to specify multiple `percolate` queries in a single search request: diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 15f7961298bf2..59d96d1a26904 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -18,6 +18,7 @@ not be included yet. * <> * <> * <> +* <> * <> * <> * <> @@ -66,6 +67,7 @@ include::{es-repo-dir}/behavioral-analytics/apis/index.asciidoc[] include::{es-repo-dir}/cat.asciidoc[] include::{es-repo-dir}/cluster.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] +include::{es-repo-dir}/connector/apis/connector-apis.asciidoc[] include::{es-repo-dir}/data-streams/data-stream-apis.asciidoc[] include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/ingest/apis/enrich/index.asciidoc[] diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index ef772ed4c0402..b162083ebb926 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -51,7 +51,7 @@ equal to `1`. Defaults to `60`. `window_size`:: (Optional, integer) This value determines the size of the individual result sets per query. A higher value will improve result relevance at the cost of performance. The final -ranked result set is pruned down to the search request's <. +ranked result set is pruned down to the search request's <>. `window_size` must be greater than or equal to `size` and greater than or equal to `1`. Defaults to `100`. diff --git a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 36e3a2cb5e2a9..6df51189e918e 100644 --- a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -48,6 +48,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -99,7 +100,7 @@ protected ClientYamlTestClient initClientYamlTestClient( final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion, + final Predicate clusterFeaturesPredicate, final String os ) { return new ClientYamlDocsTestClient( @@ -107,7 +108,7 @@ protected ClientYamlTestClient initClientYamlTestClient( restClient, hosts, esVersion, - masterVersion, + clusterFeaturesPredicate, os, this::getClientBuilderWithSniffedHosts ); diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index 979815f497583..2c33b4f2dc992 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -73,7 +73,7 @@ public Collection createComponents(PluginServices services) { final APMMeterService apmMeter = new APMMeterService(settings); apmAgentSettings.addClusterSettingsListeners(services.clusterService(), telemetryProvider.get(), apmMeter); logger.info("Sending apm metrics is {}", APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); - logger.info("Sending apm traces is {}", APMAgentSettings.APM_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); + logger.info("Sending apm tracing is {}", APMAgentSettings.APM_ENABLED_SETTING.get(settings) ? "enabled" : "disabled"); return List.of(apmTracer, apmMeter); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index 7342a432dd5df..c4634f8d52729 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -9,8 +9,6 @@ package org.elasticsearch.index.mapper.extras; import org.apache.lucene.document.FeatureField; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -42,18 +40,6 @@ public class RankFeatureFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "rank_feature"; - public static class Defaults { - public static final FieldType FIELD_TYPE; - - static { - FieldType ft = new FieldType(); - ft.setTokenized(false); - ft.setIndexOptions(IndexOptions.NONE); - ft.setOmitNorms(true); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); - } - } - private static RankFeatureFieldType ft(FieldMapper in) { return ((RankFeatureFieldMapper) in).fieldType(); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 12829ca802425..4d04e83361252 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -206,14 +206,6 @@ public String analyzer() { return analyzer.name(); } - /** - * Indicates if position increments are counted. - * @return true if position increments are counted - */ - public boolean enablePositionIncrements() { - return enablePositionIncrements; - } - @Override protected String contentType() { return CONTENT_TYPE; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index 11c726481d0b3..408b3f204de1a 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -143,7 +143,7 @@ boolean matchDocId(int docId) throws IOException { } @Override - public float score() throws IOException { + public float score() { return score; } }; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 886a67443e831..9a2653a61b60d 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -664,6 +664,11 @@ public > IFD getForField( CircuitBreakerService circuitBreaker = new NoneCircuitBreakerService(); return (IFD) builder.build(cache, circuitBreaker); } + + @Override + public void addNamedQuery(String name, Query query) { + delegate.addNamedQuery(name, query); + } }; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index e212264287937..be8d342254afd 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -530,6 +530,8 @@ static SearchExecutionContext configureContext(SearchExecutionContext context, b // as an analyzed string. wrapped.setAllowUnmappedFields(false); wrapped.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); + // We need to rewrite queries with name to Lucene NamedQuery to find matched sub-queries of percolator query + wrapped.setRewriteToNamedQueries(); return wrapped; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 43f365a2a722b..83703dcf10971 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.NamedMatches; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -96,7 +97,30 @@ public void process(HitContext hitContext) throws IOException { IntStream slots = convertTopDocsToSlots(topDocs, pc.rootDocsBySlot); // _percolator_document_slot fields are document fields and should be under "fields" section in a hit - hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList()))); + List docSlots = slots.boxed().collect(Collectors.toList()); + hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, docSlots)); + + // Add info what sub-queries of percolator query matched this each percolated document + if (fetchContext.getSearchExecutionContext().hasNamedQueries()) { + List leafContexts = percolatorIndexSearcher.getLeafContexts(); + assert leafContexts.size() == 1 : "Expected single leaf, but got [" + leafContexts.size() + "]"; + LeafReaderContext memoryReaderContext = leafContexts.get(0); + Weight weight = percolatorIndexSearcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1); + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + List namedMatchesList = NamedMatches.findNamedMatches( + weight.matches(memoryReaderContext, topDocs.scoreDocs[i].doc) + ); + if (namedMatchesList.isEmpty()) { + continue; + } + List matchedQueries = new ArrayList<>(namedMatchesList.size()); + for (NamedMatches match : namedMatchesList) { + matchedQueries.add(match.getName()); + } + String matchedFieldName = fieldName + "_" + docSlots.get(i) + "_matched_queries"; + hitContext.hit().setDocumentField(matchedFieldName, new DocumentField(matchedFieldName, matchedQueries)); + } + } } } }; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index 5f3ff5264497a..a924c0e323f96 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -9,7 +9,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -24,6 +26,7 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.lookup.LeafDocLookup; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -36,6 +39,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -335,4 +339,93 @@ public void testRangeQueriesWithNow() throws Exception { } } + public void testPercolateNamedQueries() { + String mapping = """ + { + "dynamic" : "strict", + "properties" : { + "my_query" : { "type" : "percolator" }, + "description" : { "type" : "text"}, + "num_of_bedrooms" : { "type" : "integer"}, + "type" : { "type" : "keyword"}, + "price": { "type": "float"} + } + } + """; + indicesAdmin().prepareCreate("houses").setMapping(mapping).get(); + String source = """ + { + "my_query" : { + "bool": { + "should": [ + { "match": { "description": { "query": "fireplace", "_name": "fireplace_query" } } }, + { "match": { "type": { "query": "detached", "_name": "detached_query" } } } + ], + "filter": { + "match": { + "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} + } + } + } + } + } + """; + prepareIndex("houses").setId("query_3_bedroom_detached_house_with_fireplace").setSource(source, XContentType.JSON).get(); + indicesAdmin().prepareRefresh().get(); + + source = """ + { + "my_query" : { + "bool": { + "filter": [ + { "match": { "description": { "query": "swimming pool", "_name": "swimming_pool_query" } } }, + { "match": { "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} } } + ] + } + } + } + """; + prepareIndex("houses").setId("query_3_bedroom_house_with_swimming_pool").setSource(source, XContentType.JSON).get(); + indicesAdmin().prepareRefresh().get(); + + BytesArray house1_doc = new BytesArray(""" + { + "description": "house with a beautiful fireplace and swimming pool", + "num_of_bedrooms": 3, + "type": "detached", + "price": 1000000 + } + """); + + BytesArray house2_doc = new BytesArray(""" + { + "description": "house has a wood burning fireplace", + "num_of_bedrooms": 3, + "type": "semi-detached", + "price": 500000 + } + """); + + QueryBuilder query = new PercolateQueryBuilder("my_query", List.of(house1_doc, house2_doc), XContentType.JSON); + SearchResponse response = client().prepareSearch("houses").setQuery(query).get(); + assertEquals(2, response.getHits().getTotalHits().value); + + SearchHit[] hits = response.getHits().getHits(); + assertThat(hits[0].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0, 1))); + assertThat( + hits[0].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "detached_query", "3_bedrooms_query")) + ); + assertThat( + hits[0].getFields().get("_percolator_document_slot_1_matched_queries").getValues(), + equalTo(Arrays.asList("fireplace_query", "3_bedrooms_query")) + ); + + assertThat(hits[1].getFields().get("_percolator_document_slot").getValues(), equalTo(Arrays.asList(0))); + assertThat( + hits[1].getFields().get("_percolator_document_slot_0_matched_queries").getValues(), + equalTo(Arrays.asList("swimming_pool_query", "3_bedrooms_query")) + ); + } + } diff --git a/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml new file mode 100644 index 0000000000000..1e692bc43faba --- /dev/null +++ b/modules/percolator/src/yamlRestTest/resources/rest-api-spec/test/20_matched_queries.yml @@ -0,0 +1,125 @@ +setup: + - skip: + version: " - 8.12.99" + reason: "Displaying matched named queries within percolator queries was added in 8.13" + - do: + indices.create: + index: houses + body: + mappings: + dynamic: strict + properties: + my_query: + type: percolator + description: + type: text + num_of_bedrooms: + type: integer + type: + type: keyword + price: + type: integer + + - do: + index: + refresh: true + index: houses + id: query_3_bedroom_detached_house_with_fireplace + body: + my_query: + { + "bool": { + "should": [ + { "match": { "description": { "query": "fireplace"} } }, + { "match": { "type": { "query": "detached", "_name": "detached_query" } } } + ], + "filter": { + "match": { + "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} + } + } + } + } + + - do: + index: + refresh: true + index: houses + id: query_3_bedroom_house_with_swimming_pool + body: + my_query: + { + "bool": { + "filter": [ + { "match": { "description": { "query": "swimming pool", "_name": "swimming_pool_query" } } }, + { "match": { "num_of_bedrooms": {"query": 3, "_name": "3_bedrooms_query"} } } + ] + } + } + +--- +"Matched named queries within percolator queries: percolate existing document": + - do: + index: + refresh: true + index: houses + id: house1 + body: + description: "house with a beautiful fireplace and swimming pool" + num_of_bedrooms: 3 + type: detached + price: 1000000 + + - do: + search: + index: houses + body: + query: + percolate: + field: my_query + index: houses + id: house1 + + - match: { hits.total.value: 2 } + + - match: { hits.hits.0._id: query_3_bedroom_detached_house_with_fireplace } + - match: { hits.hits.0.fields._percolator_document_slot: [0] } + - match: { hits.hits.0.fields._percolator_document_slot_0_matched_queries: ["detached_query", "3_bedrooms_query"] } + + - match: { hits.hits.1._id: query_3_bedroom_house_with_swimming_pool } + - match: { hits.hits.1.fields._percolator_document_slot: [0] } + - match: { hits.hits.1.fields._percolator_document_slot_0_matched_queries: ["swimming_pool_query", "3_bedrooms_query"] } + + +--- +"Matched named queries within percolator queries: percolate multiple documents in request": + - do: + search: + index: houses + body: + query: + percolate: + field: my_query + documents: + - { + "description": "house with a beautiful fireplace and swimming pool", + "num_of_bedrooms": 3, + "type": "detached", + "price": 1000000 + } + - { + "description": "house has a wood burning fireplace", + "num_of_bedrooms": 3, + "type": "semi-detached", + "price": 500000 + } + + - match: { hits.total.value: 2 } + + - match: { hits.hits.0._id: query_3_bedroom_detached_house_with_fireplace } + - match: { hits.hits.0.fields._percolator_document_slot: [0, 1] } + - match: { hits.hits.0.fields._percolator_document_slot_0_matched_queries: ["detached_query", "3_bedrooms_query"] } + + - match: { hits.hits.1._id: query_3_bedroom_house_with_swimming_pool } + - match: { hits.hits.1.fields._percolator_document_slot: [0] } + - match: { hits.hits.1.fields._percolator_document_slot_0_matched_queries: ["swimming_pool_query", "3_bedrooms_query"] } diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index 50dea29683540..a4f939fbe3af8 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -9,12 +9,12 @@ package org.elasticsearch.index.reindex; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractMultiClustersTestCase; import java.util.Collection; @@ -60,11 +60,9 @@ public void testReindexFromRemoteGivenIndexExists() throws Exception { new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -77,11 +75,9 @@ public void testReindexFromRemoteGivenSameIndexNames() throws Exception { new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("test-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("test-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -108,11 +104,9 @@ public void testReindexManyTimesFromRemoteGivenSameIndexNames() throws Exception } assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("test-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -142,11 +136,9 @@ public void testReindexFromRemoteGivenSimpleDateMathIndexName() throws Interrupt new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } @@ -160,11 +152,9 @@ public void testReindexFromRemoteGivenComplexDateMathIndexName() throws Interrup new ReindexRequestBuilder(client(LOCAL_CLUSTER)).source(sourceIndexInRemote).destination("desc-index-001").get(); assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { - SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") - .setQuery(new MatchAllQueryBuilder()) - .setSize(1000) - .get(); - final TotalHits totalHits = resp.getHits().getTotalHits(); + final TotalHits totalHits = SearchResponseUtils.getTotalHits( + client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000) + ); return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; })); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java index fac18c4f6f544..17dd1503e6c89 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.InternalSettingsPlugin; @@ -159,7 +160,7 @@ public void testDeleteByQueryWithRouting() throws Exception { String routing = String.valueOf(randomIntBetween(2, docs)); logger.info("--> counting documents with routing [{}]", routing); - long expected = prepareSearch().setSize(0).setRouting(routing).get().getHits().getTotalHits().value; + long expected = SearchResponseUtils.getTotalHitsValue(prepareSearch().setSize(0).setRouting(routing)); logger.info("--> delete all documents with routing [{}] with a delete-by-query", routing); DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()); diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index a47b9d8b622b5..e0d8eb86613ba 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -26,6 +26,7 @@ import java.util.Collections; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -63,7 +64,7 @@ public void testUrlRepository() throws Exception { indexDoc("test-idx", Integer.toString(i), "foo", "bar" + i); } refresh(); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); + assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -110,7 +111,7 @@ public void testUrlRepository() throws Exception { .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); + assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> list available shapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 706fb057cc8ee..c1e2888c47c62 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -8,10 +8,8 @@ package org.elasticsearch.index.mapper.murmur3; -import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.hash.MurmurHash3; @@ -36,15 +34,6 @@ public class Murmur3FieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "murmur3"; - public static class Defaults { - public static final FieldType FIELD_TYPE; - static { - FieldType ft = new FieldType(); - ft.setIndexOptions(IndexOptions.NONE); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); - } - } - private static Murmur3FieldMapper toType(FieldMapper in) { return (Murmur3FieldMapper) in; } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 16e8d2610f3fb..313dcdd6623c4 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -276,6 +277,6 @@ public void testReplicationFactorOverReplicationMax() { } private long count(Client client, String index) { - return client.prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; + return SearchResponseUtils.getTotalHitsValue(client.prepareSearch(index).setSize(0)); } } diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 7c1514d2d1a6a..b818de468ea2c 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -50,6 +51,7 @@ import java.util.Map; import java.util.Set; import java.util.function.BiPredicate; +import java.util.function.Predicate; import static java.util.Collections.unmodifiableList; @@ -159,7 +161,6 @@ public void initSearchClient() throws IOException { Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); final String os = readOsFromNodesInfo(adminSearchClient); searchYamlTestClient = new TestCandidateAwareClient( @@ -167,7 +168,7 @@ public void initSearchClient() throws IOException { searchClient, hosts, esVersion, - masterVersion, + ESRestTestCase::clusterHasFeature, os, this::getClientBuilderWithSniffedHosts ); @@ -328,11 +329,11 @@ static class TestCandidateAwareClient extends ClientYamlTestClient { RestClient restClient, List hosts, Version esVersion, - Version masterVersion, + Predicate clusterFeaturesPredicate, String os, CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, masterVersion, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); } public void setTestCandidate(ClientYamlTestCandidate testCandidate) { diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 4fc82bb77fbb6..51d499db61932 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.TestCandidateAwareClient; import org.junit.AfterClass; @@ -222,7 +223,6 @@ public void initSearchClient() throws IOException { Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); final String os = readOsFromNodesInfo(adminSearchClient); searchYamlTestClient = new TestCandidateAwareClient( @@ -230,7 +230,7 @@ public void initSearchClient() throws IOException { searchClient, hosts, esVersion, - masterVersion, + ESRestTestCase::clusterHasFeature, os, this::getClientBuilderWithSniffedHosts ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 05e3b81c3683f..45906abd29ff8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -7,14 +7,18 @@ */ package org.elasticsearch.action.admin; +import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.logging.ChunkedLoggingStreamTests; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; import java.util.Map; @@ -29,6 +33,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; @@ -176,4 +181,25 @@ public void testTimestampAndParams() throws ExecutionException, InterruptedExcep } } } + + @TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE") + public void testLogLocalHotThreads() { + final var level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); + assertThat( + ChunkedLoggingStreamTests.getDecodedLoggedBody( + logger, + level, + getTestName(), + ReferenceDocs.LOGGING, + () -> HotThreads.logLocalHotThreads(logger, level, getTestName(), ReferenceDocs.LOGGING) + ).utf8ToString(), + allOf( + containsString("Hot threads at"), + containsString("interval=500ms"), + containsString("busiestThreads=500"), + containsString("ignoreIdleThreads=false"), + containsString("cpu usage by thread") + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java index 1a8f928d9c10f..199a397f52ad2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/SearchIdleIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -48,14 +49,7 @@ public class SearchIdleIT extends ESSingleNodeTestCase { public void testAutomaticRefreshSearch() throws InterruptedException { - runTestAutomaticRefresh(numDocs -> { - var resp = client().prepareSearch("test").get(); - try { - return resp.getHits().getTotalHits().value; - } finally { - resp.decRef(); - } - }); + runTestAutomaticRefresh(numDocs -> SearchResponseUtils.getTotalHitsValue(client().prepareSearch("test"))); } public void testAutomaticRefreshGet() throws InterruptedException { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index b04aa321f70f1..df59ab18bef72 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -510,7 +510,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> wait for delete to be enqueued in cluster state"); awaitClusterState(state -> { final SnapshotDeletionsInProgress deletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE); - return deletionsInProgress.getEntries().size() == 1 && deletionsInProgress.getEntries().get(0).getSnapshots().size() == 3; + return deletionsInProgress.getEntries().size() == 1 && deletionsInProgress.getEntries().get(0).snapshots().size() == 3; }); logger.info("--> waiting for second snapshot to finish and the other two snapshots to become aborted"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java index 4c9de6cb5369f..23f218130a053 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java @@ -86,7 +86,7 @@ public void testNoDoubleFinalization() throws Exception { final SnapshotDeletionsInProgress snapshotDeletionsInProgress = SnapshotDeletionsInProgress.get(state); return snapshotDeletionsInProgress.getEntries() .stream() - .flatMap(entry -> entry.getSnapshots().stream()) + .flatMap(entry -> entry.snapshots().stream()) .anyMatch(snapshotId -> snapshotId.getName().equals("snap-1")); }); @@ -149,7 +149,7 @@ public void testNoDoubleFinalization() throws Exception { .stream() .anyMatch( entry -> entry.state() == SnapshotDeletionsInProgress.State.WAITING - && entry.getSnapshots().stream().anyMatch(snapshotId -> snapshotId.getName().equals("snap-2")) + && entry.snapshots().stream().anyMatch(snapshotId -> snapshotId.getName().equals("snap-2")) ); }); new Thread(() -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 3fabd53299674..ea56c85e36a3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.io.StringWriter; import java.util.List; public class TransportNodesHotThreadsAction extends TransportNodesAction< @@ -79,8 +80,9 @@ protected NodeHotThreads nodeOperation(NodeRequest request, Task task) { .interval(request.request.interval) .threadElementsSnapshotCount(request.request.snapshots) .ignoreIdleThreads(request.request.ignoreIdleThreads); - try { - return new NodeHotThreads(clusterService.localNode(), hotThreads.detect()); + try (var writer = new StringWriter()) { + hotThreads.detect(writer); + return new NodeHotThreads(clusterService.localNode(), writer.toString()); } catch (Exception e) { throw new ElasticsearchException("failed to detect hot threads", e); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java index 98dfc48dd3cd0..c62eeeab3e479 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java @@ -106,9 +106,9 @@ private static ClusterSnapshotStats of( for (SnapshotDeletionsInProgress.Entry entry : snapshotDeletionsInProgress.getEntries()) { if (entry.repository().equals(repositoryName)) { - firstStartTimeMillis = Math.min(firstStartTimeMillis, entry.getStartTime()); + firstStartTimeMillis = Math.min(firstStartTimeMillis, entry.startTime()); deletionsCount += 1; - snapshotDeletionsCount += entry.getSnapshots().size(); + snapshotDeletionsCount += entry.snapshots().size(); if (entry.state() == SnapshotDeletionsInProgress.State.STARTED) { activeDeletionsCount += 1; } diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index d3a9397d148cf..2dba73a3ec68f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -38,7 +38,7 @@ public RepositoryCleanupInProgress(List entries) { } RepositoryCleanupInProgress(StreamInput in) throws IOException { - this.entries = in.readCollectionAsList(Entry::new); + this.entries = in.readCollectionAsList(Entry::readFrom); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -92,20 +92,10 @@ public TransportVersion getMinimalSupportedVersion() { return TransportVersions.V_7_4_0; } - public static final class Entry implements Writeable, RepositoryOperation { + public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation { - private final String repository; - - private final long repositoryStateId; - - private Entry(StreamInput in) throws IOException { - repository = in.readString(); - repositoryStateId = in.readLong(); - } - - public Entry(String repository, long repositoryStateId) { - this.repository = repository; - this.repositoryStateId = repositoryStateId; + public static Entry readFrom(StreamInput in) throws IOException { + return new Entry(in.readString(), in.readLong()); } @Override @@ -123,10 +113,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(repository); out.writeLong(repositoryStateId); } - - @Override - public String toString() { - return "{" + repository + '}' + '{' + repositoryStateId + '}'; - } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index bd7a2ed1cffc0..2b618aa53a354 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -153,12 +153,7 @@ public Entry( /** * Represents status of a restored shard */ - public static class ShardRestoreStatus implements Writeable { - private State state; - private String nodeId; - private String reason; - - private ShardRestoreStatus() {} + public record ShardRestoreStatus(String nodeId, State state, String reason) implements Writeable { /** * Constructs a new shard restore status in initializing state on the given node @@ -179,67 +174,8 @@ public ShardRestoreStatus(String nodeId, State state) { this(nodeId, state, null); } - /** - * Constructs a new shard restore status in with specified state on the given node with specified failure reason - * - * @param nodeId node id - * @param state restore state - * @param reason failure reason - */ - public ShardRestoreStatus(String nodeId, State state, String reason) { - this.nodeId = nodeId; - this.state = state; - this.reason = reason; - } - - /** - * Returns current state - * - * @return current state - */ - public State state() { - return state; - } - - /** - * Returns node id of the node where shared is getting restored - * - * @return node id - */ - public String nodeId() { - return nodeId; - } - - /** - * Returns failure reason - * - * @return failure reason - */ - public String reason() { - return reason; - } - - /** - * Reads restore status from stream input - * - * @param in stream input - * @return restore status - */ - public static ShardRestoreStatus readShardRestoreStatus(StreamInput in) throws IOException { - ShardRestoreStatus shardSnapshotStatus = new ShardRestoreStatus(); - shardSnapshotStatus.readFrom(in); - return shardSnapshotStatus; - } - - /** - * Reads restore status from stream input - * - * @param in stream input - */ - public void readFrom(StreamInput in) throws IOException { - nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); - reason = in.readOptionalString(); + public static ShardRestoreStatus readFrom(StreamInput in) throws IOException { + return new ShardRestoreStatus(in.readOptionalString(), State.fromValue(in.readByte()), in.readOptionalString()); } /** @@ -253,24 +189,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value); out.writeOptionalString(reason); } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - ShardRestoreStatus status = (ShardRestoreStatus) o; - return state == status.state && Objects.equals(nodeId, status.nodeId) && Objects.equals(reason, status.reason); - } - - @Override - public int hashCode() { - return Objects.hash(state, nodeId, reason); - } } /** @@ -375,14 +293,7 @@ public RestoreInProgress(StreamInput in) throws IOException { List indices = in.readCollectionAsImmutableList(StreamInput::readString); entriesBuilder.put( uuid, - new Entry( - uuid, - snapshot, - state, - quiet, - indices, - in.readImmutableMap(ShardId::new, ShardRestoreStatus::readShardRestoreStatus) - ) + new Entry(uuid, snapshot, state, quiet, indices, in.readImmutableMap(ShardId::new, ShardRestoreStatus::readFrom)) ); } this.entries = Collections.unmodifiableMap(entriesBuilder); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 234c9a924d8a8..eea89c6ff3714 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -11,13 +11,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.repositories.RepositoryOperation; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.xcontent.ToXContent; @@ -29,7 +29,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Objects; import java.util.Set; /** @@ -58,7 +57,7 @@ public static SnapshotDeletionsInProgress of(List entries) { @@ -195,7 +194,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore public String toString() { StringBuilder builder = new StringBuilder("SnapshotDeletionsInProgress["); for (int i = 0; i < entries.size(); i++) { - builder.append(entries.get(i).getSnapshots()); + builder.append(entries.get(i).snapshots()); if (i + 1 < entries.size()) { builder.append(","); } @@ -206,98 +205,56 @@ public String toString() { /** * A class representing a snapshot deletion request entry in the cluster state. */ - public static final class Entry implements Writeable, RepositoryOperation { - private final List snapshots; - private final String repoName; - private final State state; - private final long startTime; - private final long repositoryStateId; - private final String uuid; - - public Entry(List snapshots, String repoName, long startTime, long repositoryStateId, State state) { - this(snapshots, repoName, startTime, repositoryStateId, state, UUIDs.randomBase64UUID()); + public record Entry(String repoName, List snapshots, long startTime, long repositoryStateId, State state, String uuid) + implements + Writeable, + RepositoryOperation { + + @SuppressForbidden(reason = "using a private constructor within the same file") + public Entry(String repoName, List snapshots, long startTime, long repositoryStateId, State state) { + this(repoName, snapshots, startTime, repositoryStateId, state, UUIDs.randomBase64UUID()); } - private Entry(List snapshots, String repoName, long startTime, long repositoryStateId, State state, String uuid) { - this.snapshots = snapshots; + public Entry { assert snapshots.size() == new HashSet<>(snapshots).size() : "Duplicate snapshot ids in " + snapshots; - this.repoName = repoName; - this.startTime = startTime; - this.repositoryStateId = repositoryStateId; - this.state = state; - this.uuid = uuid; } - public Entry(StreamInput in) throws IOException { - this.repoName = in.readString(); - this.snapshots = in.readCollectionAsImmutableList(SnapshotId::new); - this.startTime = in.readVLong(); - this.repositoryStateId = in.readLong(); - this.state = State.readFrom(in); - this.uuid = in.readString(); + @SuppressForbidden(reason = "using a private constructor within the same file") + public static Entry readFrom(StreamInput in) throws IOException { + return new Entry( + in.readString(), + in.readCollectionAsImmutableList(SnapshotId::new), + in.readVLong(), + in.readLong(), + State.readFrom(in), + in.readString() + ); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry started() { assert state == State.WAITING; - return new Entry(snapshots, repository(), startTime, repositoryStateId, State.STARTED, uuid); + return new Entry(repository(), snapshots, startTime, repositoryStateId, State.STARTED, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withAddedSnapshots(Collection newSnapshots) { assert state == State.WAITING; final Collection updatedSnapshots = new HashSet<>(snapshots); if (updatedSnapshots.addAll(newSnapshots) == false) { return this; } - return new Entry(List.copyOf(updatedSnapshots), repository(), startTime, repositoryStateId, State.WAITING, uuid); + return new Entry(repository(), List.copyOf(updatedSnapshots), startTime, repositoryStateId, State.WAITING, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withSnapshots(Collection snapshots) { - return new Entry(List.copyOf(snapshots), repository(), startTime, repositoryStateId, state, uuid); + return new Entry(repository(), List.copyOf(snapshots), startTime, repositoryStateId, state, uuid); } + @SuppressForbidden(reason = "using a private constructor within the same file") public Entry withRepoGen(long repoGen) { - return new Entry(snapshots, repository(), startTime, repoGen, state, uuid); - } - - public State state() { - return state; - } - - public String uuid() { - return uuid; - } - - public List getSnapshots() { - return snapshots; - } - - /** - * The start time in milliseconds for deleting the snapshots. - */ - public long getStartTime() { - return startTime; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Entry that = (Entry) o; - return repoName.equals(that.repoName) - && snapshots.equals(that.snapshots) - && startTime == that.startTime - && repositoryStateId == that.repositoryStateId - && state == that.state - && uuid.equals(that.uuid); - } - - @Override - public int hashCode() { - return Objects.hash(snapshots, repoName, startTime, repositoryStateId, state, uuid); + return new Entry(repository(), snapshots, startTime, repoGen, state, uuid); } @Override @@ -319,18 +276,6 @@ public String repository() { public long repositoryStateId() { return repositoryStateId; } - - @Override - public String toString() { - return Strings.format( - "SnapshotDeletionsInProgress.Entry[[%s@%d][%s][%s]%s]", - repoName, - repositoryStateId, - uuid, - state, - snapshots - ); - } } public enum State implements Writeable { diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 1a079d03405d7..470f175deb247 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; @@ -468,7 +469,14 @@ public static State fromValue(byte value) { } } - public static class ShardSnapshotStatus implements Writeable { + public record ShardSnapshotStatus( + @Nullable String nodeId, + ShardState state, + @Nullable ShardGeneration generation, + @Nullable String reason, + @Nullable // only present in state SUCCESS; may be null even in SUCCESS if this state came over the wire from an older node + ShardSnapshotResult shardSnapshotResult + ) implements Writeable { /** * Shard snapshot status for shards that are waiting for another operation to finish before they can be assigned to a node. @@ -486,41 +494,38 @@ public static class ShardSnapshotStatus implements Writeable { public static final ShardSnapshotStatus MISSING = new SnapshotsInProgress.ShardSnapshotStatus( null, ShardState.MISSING, - "missing index", - null + null, + "missing index" ); - private final ShardState state; - - @Nullable - private final String nodeId; - - @Nullable - private final ShardGeneration generation; - - @Nullable - private final String reason; - - @Nullable // only present in state SUCCESS; may be null even in SUCCESS if this state came over the wire from an older node - private final ShardSnapshotResult shardSnapshotResult; - public ShardSnapshotStatus(String nodeId, ShardGeneration generation) { this(nodeId, ShardState.INIT, generation); } public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable ShardGeneration generation) { - this(nodeId, assertNotSuccess(state), null, generation); + this(nodeId, assertNotSuccess(state), generation, null); } - public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, String reason, @Nullable ShardGeneration generation) { - this(nodeId, assertNotSuccess(state), reason, generation, null); + @SuppressForbidden(reason = "using a private constructor within the same file") + public ShardSnapshotStatus(@Nullable String nodeId, ShardState state, @Nullable ShardGeneration generation, String reason) { + this(nodeId, assertNotSuccess(state), generation, reason, null); } - private ShardSnapshotStatus( + private static ShardState assertNotSuccess(ShardState shardState) { + assert shardState != ShardState.SUCCESS : "use ShardSnapshotStatus#success"; + return shardState; + } + + @SuppressForbidden(reason = "using a private constructor within the same file") + public static ShardSnapshotStatus success(String nodeId, ShardSnapshotResult shardSnapshotResult) { + return new ShardSnapshotStatus(nodeId, ShardState.SUCCESS, shardSnapshotResult.getGeneration(), null, shardSnapshotResult); + } + + public ShardSnapshotStatus( @Nullable String nodeId, ShardState state, - String reason, @Nullable ShardGeneration generation, + String reason, @Nullable ShardSnapshotResult shardSnapshotResult ) { this.nodeId = nodeId; @@ -531,15 +536,6 @@ private ShardSnapshotStatus( assert assertConsistent(); } - private static ShardState assertNotSuccess(ShardState shardState) { - assert shardState != ShardState.SUCCESS : "use ShardSnapshotStatus#success"; - return shardState; - } - - public static ShardSnapshotStatus success(String nodeId, ShardSnapshotResult shardSnapshotResult) { - return new ShardSnapshotStatus(nodeId, ShardState.SUCCESS, null, shardSnapshotResult.getGeneration(), shardSnapshotResult); - } - private boolean assertConsistent() { // If the state is failed we have to have a reason for this failure assert state.failed() == false || reason != null; @@ -552,6 +548,7 @@ private boolean assertConsistent() { return true; } + @SuppressForbidden(reason = "using a private constructor within the same file") public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { final String nodeId = DiscoveryNode.deduplicateNodeIdentifier(in.readOptionalString()); final ShardState state = ShardState.fromValue(in.readByte()); @@ -561,34 +558,17 @@ public static ShardSnapshotStatus readFrom(StreamInput in) throws IOException { if (state == ShardState.QUEUED) { return UNASSIGNED_QUEUED; } - return new ShardSnapshotStatus(nodeId, state, reason, generation, shardSnapshotResult); - } - - public ShardState state() { - return state; - } - - @Nullable - public String nodeId() { - return nodeId; - } - - @Nullable - public ShardGeneration generation() { - return this.generation; - } - - public String reason() { - return reason; + return new ShardSnapshotStatus(nodeId, state, generation, reason, shardSnapshotResult); } + @SuppressForbidden(reason = "using a private constructor within the same file") public ShardSnapshotStatus withUpdatedGeneration(ShardGeneration newGeneration) { assert state == ShardState.SUCCESS : "can't move generation in state " + state; return new ShardSnapshotStatus( nodeId, state, - reason, newGeneration, + reason, shardSnapshotResult == null ? null : new ShardSnapshotResult(newGeneration, shardSnapshotResult.getSize(), shardSnapshotResult.getSegmentCount()) @@ -618,43 +598,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(reason); out.writeOptionalWriteable(shardSnapshotResult); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ShardSnapshotStatus status = (ShardSnapshotStatus) o; - return Objects.equals(nodeId, status.nodeId) - && Objects.equals(reason, status.reason) - && Objects.equals(generation, status.generation) - && state == status.state - && Objects.equals(shardSnapshotResult, status.shardSnapshotResult); - } - - @Override - public int hashCode() { - int result = state != null ? state.hashCode() : 0; - result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0); - result = 31 * result + (reason != null ? reason.hashCode() : 0); - result = 31 * result + (generation != null ? generation.hashCode() : 0); - result = 31 * result + (shardSnapshotResult != null ? shardSnapshotResult.hashCode() : 0); - return result; - } - - @Override - public String toString() { - return "ShardSnapshotStatus[state=" - + state - + ", nodeId=" - + nodeId - + ", reason=" - + reason - + ", generation=" - + generation - + ", shardSnapshotResult=" - + shardSnapshotResult - + "]"; - } } public static class Entry implements Writeable, ToXContentObject, RepositoryOperation, Diffable { @@ -1029,8 +972,8 @@ public Entry abort() { status = new ShardSnapshotStatus( nodeId, nodeId == null ? ShardState.FAILED : ShardState.ABORTED, - "aborted by snapshot deletion", - status.generation() + status.generation(), + "aborted by snapshot deletion" ); } completed &= status.state().completed(); diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 0380bb80e0013..1d8a9ef1ce1c4 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -61,9 +60,7 @@ import java.io.Closeable; import java.io.IOException; -import java.io.OutputStreamWriter; import java.io.UncheckedIOException; -import java.nio.charset.StandardCharsets; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; @@ -956,13 +953,7 @@ private void maybeLogThreadDump(ShardId shardId, String message) { return; } nextShardLockHotThreadsNanos = now + TimeUnit.SECONDS.toNanos(60); - final var hotThreads = new HotThreads().busiestThreads(500).ignoreIdleThreads(false).detect(); - try ( - var stream = ChunkedLoggingStream.create(logger, Level.DEBUG, prefix, ReferenceDocs.SHARD_LOCK_TROUBLESHOOTING); - var writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8) - ) { - writer.write(hotThreads); - } + HotThreads.logLocalHotThreads(logger, Level.DEBUG, prefix, ReferenceDocs.SHARD_LOCK_TROUBLESHOOTING); } catch (Exception e) { logger.error(format("could not obtain %s", prefix), e); } finally { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java index 946a0a1aa1718..af341e64661d1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java @@ -139,7 +139,7 @@ public static PostingsEnum leafLookup(LeafReader reader) throws IOException { return reader.postings(TERM); } - private class SyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private static class SyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { private PostingsEnum postings; private boolean hasValue; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 958db80ae64c2..4f3c4814517e5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -179,9 +179,6 @@ public SourceFieldMapper build() { return sourceFieldMapper; } - private IndexMode getIndexMode() { - return indexMode; - } } public static final TypeParser PARSER = new ConfigurableTypeParser( diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index e9d2223029e14..f86142ffbe862 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.NamedMatches; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.ParsingException; @@ -121,6 +122,9 @@ public final Query toQuery(SearchExecutionContext context) throws IOException { } } if (queryName != null) { + if (context.rewriteToNamedQuery()) { + query = NamedMatches.wrapQuery(queryName, query); + } context.addNamedQuery(queryName, query); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java index a966d94e8b72f..26415a3d0e777 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/FilteredSearchExecutionContext.java @@ -378,4 +378,14 @@ public boolean indexMatches(String pattern) { public Set getMatchingFieldNames(String pattern) { return in.getMatchingFieldNames(pattern); } + + @Override + public void setRewriteToNamedQueries() { + in.setRewriteToNamedQueries(); + } + + @Override + public boolean rewriteToNamedQuery() { + return in.rewriteToNamedQuery(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 143dfe7fe6e9d..c04182dfacd54 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -97,8 +97,8 @@ public class SearchExecutionContext extends QueryRewriteContext { private final Map namedQueries = new HashMap<>(); private NestedScope nestedScope; - private QueryBuilder aliasFilter; + private boolean rewriteToNamedQueries = false; /** * Build a {@linkplain SearchExecutionContext}. @@ -297,6 +297,10 @@ public Map copyNamedQueries() { return Map.copyOf(namedQueries); } + public boolean hasNamedQueries() { + return (namedQueries.isEmpty() == false); + } + /** * Parse a document with current mapping. */ @@ -619,4 +623,19 @@ public MappingLookup.CacheKey mappingCacheKey() { public NestedDocuments getNestedDocuments() { return new NestedDocuments(mappingLookup, bitsetFilterCache::getBitSetProducer, indexVersionCreated()); } + + /** + * Instructs to rewrite Elasticsearch queries with _name to Lucene NamedQuery + */ + public void setRewriteToNamedQueries() { + this.rewriteToNamedQueries = true; + } + + /** + * Returns true if Elasticsearch queries with _name must be rewritten to Lucene NamedQuery + * @return + */ + public boolean rewriteToNamedQuery() { + return rewriteToNamedQueries; + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index e02a3a40b77ef..ae454b6af1e6c 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -8,25 +8,32 @@ package org.elasticsearch.monitor.jvm; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.transport.Transports; +import java.io.OutputStreamWriter; +import java.io.Writer; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; +import java.nio.charset.StandardCharsets; import java.time.Clock; import java.time.LocalDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.ToLongFunction; @@ -68,6 +75,31 @@ public class HotThreads { "DestroyJavaVM" ); + /** + * Capture and log the hot threads on the local node. Useful for capturing stack traces for unexpectedly-slow operations in production. + * The resulting log message may be large, and contains significant whitespace, so it is compressed and base64-encoded using {@link + * ChunkedLoggingStream}. + * + * @param logger The logger to use for the logging + * @param level The log level to use for the logging. + * @param prefix The prefix to emit on each chunk of the logging. + * @param referenceDocs A link to the docs describing how to decode the logging. + */ + public static void logLocalHotThreads(Logger logger, Level level, String prefix, ReferenceDocs referenceDocs) { + if (logger.isEnabled(level) == false) { + return; + } + + try ( + var stream = ChunkedLoggingStream.create(logger, level, prefix, referenceDocs); + var writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8) + ) { + new HotThreads().busiestThreads(500).ignoreIdleThreads(false).detect(writer); + } catch (Exception e) { + logger.error(() -> org.elasticsearch.common.Strings.format("failed to write local hot threads with prefix [%s]", prefix), e); + } + } + public enum ReportType { CPU("cpu"), @@ -154,12 +186,12 @@ public HotThreads sortOrder(SortOrder order) { return this; } - public String detect() throws Exception { + public void detect(Writer writer) throws Exception { synchronized (mutex) { - return innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), (interval) -> { + innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), (interval) -> { Thread.sleep(interval); return null; - }); + }, writer); } } @@ -230,8 +262,13 @@ private double getTimeSharePercentage(long time) { return (((double) time) / interval.nanos()) * 100; } - String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long currentThreadId, SleepFunction threadSleep) - throws Exception { + void innerDetect( + ThreadMXBean threadBean, + SunThreadInfo sunThreadInfo, + long currentThreadId, + SleepFunction threadSleep, + Writer writer + ) throws Exception { if (threadBean.isThreadCpuTimeSupported() == false) { throw new ElasticsearchException("thread CPU time is not supported on this JDK"); } @@ -246,14 +283,14 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu throw new ElasticsearchException("thread wait/blocked time accounting is not supported on this JDK"); } - StringBuilder sb = new StringBuilder().append("Hot threads at ") + writer.append("Hot threads at ") .append(DATE_TIME_FORMATTER.format(LocalDateTime.now(Clock.systemUTC()))) .append(", interval=") - .append(interval) + .append(interval.toString()) .append(", busiestThreads=") - .append(busiestThreads) + .append(Integer.toString(busiestThreads)) .append(", ignoreIdleThreads=") - .append(ignoreIdleThreads) + .append(Boolean.toString(ignoreIdleThreads)) .append(":\n"); // Capture before and after thread state with timings @@ -303,9 +340,8 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu ThreadTimeAccumulator topThread = topThreads.get(t); switch (type) { - case MEM -> sb.append( - String.format( - Locale.ROOT, + case MEM -> writer.append( + Strings.format( "%n%s memory allocated by thread '%s'%n", ByteSizeValue.ofBytes(topThread.getAllocatedBytes()), threadName @@ -318,9 +354,8 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu : getTimeSharePercentage(topThread.getOtherTime()); double percentTotal = (Transports.isTransportThread(threadName)) ? percentCpu : percentOther + percentCpu; String otherLabel = (Transports.isTransportThread(threadName)) ? "idle" : "other"; - sb.append( - String.format( - Locale.ROOT, + writer.append( + Strings.format( "%n%4.1f%% [cpu=%1.1f%%, %s=%1.1f%%] (%s out of %s) %s usage by thread '%s'%n", percentTotal, percentCpu, @@ -336,9 +371,8 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu default -> { long time = ThreadTimeAccumulator.valueGetterForReportType(type).applyAsLong(topThread); double percent = getTimeSharePercentage(time); - sb.append( - String.format( - Locale.ROOT, + writer.append( + Strings.format( "%n%4.1f%% (%s out of %s) %s usage by thread '%s'%n", percent, TimeValue.timeValueNanos(time), @@ -377,29 +411,21 @@ String innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long cu if (allInfos[i][t] != null) { final StackTraceElement[] show = allInfos[i][t].getStackTrace(); if (count == 1) { - sb.append(String.format(Locale.ROOT, " unique snapshot%n")); + writer.append(Strings.format(" unique snapshot%n")); for (StackTraceElement frame : show) { - sb.append(String.format(Locale.ROOT, " %s%n", frame)); + writer.append(Strings.format(" %s%n", frame)); } } else { - sb.append( - String.format( - Locale.ROOT, - " %d/%d snapshots sharing following %d elements%n", - count, - threadElementsSnapshotCount, - maxSim - ) + writer.append( + Strings.format(" %d/%d snapshots sharing following %d elements%n", count, threadElementsSnapshotCount, maxSim) ); for (int l = show.length - maxSim; l < show.length; l++) { - sb.append(String.format(Locale.ROOT, " %s%n", show[l])); + writer.append(Strings.format(" %s%n", show[l])); } } } } } - - return sb.toString(); } static int similarity(ThreadInfo threadInfo, ThreadInfo threadInfo0) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 61923dcff2d78..74d3c6a084217 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -1427,7 +1427,7 @@ private void applyDataStreamRestores(ClusterState currentState, Metadata.Builder private void ensureSnapshotNotDeleted(ClusterState currentState) { SnapshotDeletionsInProgress deletionsInProgress = SnapshotDeletionsInProgress.get(currentState); - if (deletionsInProgress.getEntries().stream().anyMatch(entry -> entry.getSnapshots().contains(snapshot.getSnapshotId()))) { + if (deletionsInProgress.getEntries().stream().anyMatch(entry -> entry.snapshots().contains(snapshot.getSnapshotId()))) { throw new ConcurrentSnapshotExecutionException( snapshot, "cannot restore a snapshot while a snapshot deletion is in-progress [" + deletionsInProgress.getEntries().get(0) + "]" diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index cc390618e5ebb..0f7c4f71a089c 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -575,7 +575,7 @@ private void notifyUnsuccessfulSnapshotShard( sendSnapshotShardUpdate( snapshot, shardId, - new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, failure, generation) + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, generation, failure) ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 3b872f550fc6f..f62061f5d5b4b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -335,7 +335,7 @@ public ClusterState execute(ClusterState currentState) { .findAny() .orElseThrow(() -> new SnapshotMissingException(repositoryName, request.source())); final SnapshotDeletionsInProgress deletionsInProgress = SnapshotDeletionsInProgress.get(currentState); - if (deletionsInProgress.getEntries().stream().anyMatch(entry -> entry.getSnapshots().contains(sourceSnapshotId))) { + if (deletionsInProgress.getEntries().stream().anyMatch(entry -> entry.snapshots().contains(sourceSnapshotId))) { throw new ConcurrentSnapshotExecutionException( repositoryName, sourceSnapshotId.getName(), @@ -626,8 +626,8 @@ private void runReadyClone( new ShardSnapshotStatus( localNodeId, ShardState.FAILED, - "failed to clone shard snapshot", - shardStatusBefore.generation() + shardStatusBefore.generation(), + "failed to clone shard snapshot" ), ActionListener.runBefore( ActionListener.wrap( @@ -1187,8 +1187,8 @@ private static ImmutableOpenMap processWaitingShar final ShardSnapshotStatus failedState = new ShardSnapshotStatus( shardStatus.nodeId(), ShardState.FAILED, - "shard is unassigned", - shardStatus.generation() + shardStatus.generation(), + "shard is unassigned" ); shards.put(shardId, failedState); knownFailures.put(shardEntry.getKey(), failedState); @@ -1202,8 +1202,8 @@ private static ImmutableOpenMap processWaitingShar final ShardSnapshotStatus failedState = new ShardSnapshotStatus( shardStatus.nodeId(), ShardState.FAILED, - "node left the cluster during snapshot", - shardStatus.generation() + shardStatus.generation(), + "node left the cluster during snapshot" ); shards.put(shardId, failedState); knownFailures.put(shardEntry.getKey(), failedState); @@ -1901,7 +1901,7 @@ private static SnapshotDeletionsInProgress deletionsWithoutSnapshots( List updatedEntries = new ArrayList<>(deletions.getEntries().size()); for (SnapshotDeletionsInProgress.Entry entry : deletions.getEntries()) { if (entry.repository().equals(repository)) { - final List updatedSnapshotIds = new ArrayList<>(entry.getSnapshots()); + final List updatedSnapshotIds = new ArrayList<>(entry.snapshots()); if (updatedSnapshotIds.removeAll(snapshotIds)) { changed = true; updatedEntries.add(entry.withSnapshots(updatedSnapshotIds)); @@ -2067,7 +2067,7 @@ public ClusterState execute(ClusterState currentState) { .filter( entry -> entry.repository().equals(repositoryName) && entry.state() == SnapshotDeletionsInProgress.State.STARTED - && entry.getSnapshots().containsAll(snapshotIds) + && entry.snapshots().containsAll(snapshotIds) ) .findFirst(); if (foundDuplicate.isPresent()) { @@ -2076,8 +2076,8 @@ public ClusterState execute(ClusterState currentState) { return currentState; } newDelete = new SnapshotDeletionsInProgress.Entry( - List.copyOf(snapshotIdsRequiringCleanup), repositoryName, + List.copyOf(snapshotIdsRequiringCleanup), threadPool.absoluteTimeInMillis(), repositoryData.getGenId(), updatedSnapshots.forRepo(repositoryName).stream().noneMatch(SnapshotsService::isWritingToRepository) @@ -2363,7 +2363,7 @@ private void deleteSnapshotsFromRepository( ) { if (repositoryOperations.startDeletion(deleteEntry.uuid())) { assert currentlyFinalizing.contains(deleteEntry.repository()); - final List snapshotIds = deleteEntry.getSnapshots(); + final List snapshotIds = deleteEntry.snapshots(); assert deleteEntry.state() == SnapshotDeletionsInProgress.State.STARTED : "incorrect state for entry [" + deleteEntry + "]"; if (snapshotIds.isEmpty()) { // this deletion overlapped one or more deletions that were successfully processed and there is no remaining snapshot to @@ -2415,7 +2415,7 @@ public void onFailure(Exception e) { logger.warn(() -> { final StringBuilder sb = new StringBuilder("failed to complete snapshot deletion for ["); Strings.collectionToDelimitedStringWithLimit( - deleteEntry.getSnapshots().stream().map(SnapshotId::getName).toList(), + deleteEntry.snapshots().stream().map(SnapshotId::getName).toList(), ",", "", "", @@ -2461,7 +2461,7 @@ private void removeSnapshotDeletionFromClusterState( protected SnapshotDeletionsInProgress filterDeletions(SnapshotDeletionsInProgress deletions) { final SnapshotDeletionsInProgress updatedDeletions = deletionsWithoutSnapshots( deletions, - deleteEntry.getSnapshots(), + deleteEntry.snapshots(), deleteEntry.repository() ); return updatedDeletions == null ? deletions : updatedDeletions; @@ -2469,7 +2469,7 @@ protected SnapshotDeletionsInProgress filterDeletions(SnapshotDeletionsInProgres @Override protected void handleListeners(List> deleteListeners) { - assert repositoryData.getSnapshotIds().stream().noneMatch(deleteEntry.getSnapshots()::contains) + assert repositoryData.getSnapshotIds().stream().noneMatch(deleteEntry.snapshots()::contains) : "Repository data contained snapshot ids " + repositoryData.getSnapshotIds() + " that should should been deleted by [" @@ -2890,15 +2890,15 @@ private static ImmutableOpenMap new SnapshotDeletionsInProgress.Entry( - Collections.emptyList(), randomAlphaOfLength(10), + Collections.emptyList(), randomNonNegativeLong(), randomNonNegativeLong(), randomFrom(SnapshotDeletionsInProgress.State.values()) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java index cc8a65d0d577f..519184800d443 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java @@ -216,8 +216,8 @@ private SnapshotsInProgress makeSnapshotsInProgress(ShardId snapshotShardId, Sna shardSnapshotStatus = new SnapshotsInProgress.ShardSnapshotStatus( nodeId, shardState, - randomAlphaOfLength(10), - ShardGeneration.newGeneration(random()) + ShardGeneration.newGeneration(random()), + randomAlphaOfLength(10) ); } else { shardSnapshotStatus = new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, ShardGeneration.newGeneration(random())); diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 0f375246c3337..5484998fef2e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -131,8 +131,8 @@ public void testSnapshotDeletionsInProgressSerialization() throws Exception { SnapshotDeletionsInProgress.of( List.of( new SnapshotDeletionsInProgress.Entry( - Collections.singletonList(new SnapshotId("snap1", UUIDs.randomBase64UUID())), "repo1", + Collections.singletonList(new SnapshotId("snap1", UUIDs.randomBase64UUID())), randomNonNegativeLong(), randomNonNegativeLong(), SnapshotDeletionsInProgress.State.STARTED diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java deleted file mode 100644 index aca1984c502cd..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.StringField; - -public class ExternalMetadataMapper extends MetadataFieldMapper { - - static final String CONTENT_TYPE = "_external_root"; - static final String FIELD_NAME = "_is_external"; - static final String FIELD_VALUE = "true"; - - protected ExternalMetadataMapper() { - super(new BooleanFieldMapper.BooleanFieldType(FIELD_NAME)); - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - public void postParse(DocumentParserContext context) { - context.doc().add(new StringField(FIELD_NAME, FIELD_VALUE, Store.YES)); - } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - throw new UnsupportedOperationException(); - } - - public static final TypeParser PARSER = new FixedTypeParser(c -> new ExternalMetadataMapper()); -} diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java index 569062317b7b5..16760d103f17f 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java @@ -15,6 +15,7 @@ import org.mockito.ArgumentMatchers; import org.mockito.InOrder; +import java.io.StringWriter; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.util.ArrayList; @@ -302,7 +303,7 @@ public void testInnerDetectCPUMode() throws Exception { .threadElementsSnapshotCount(11) .ignoreIdleThreads(false); - String innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(innerResult, containsString("Hot threads at ")); assertThat(innerResult, containsString("interval=10ms, busiestThreads=4, ignoreIdleThreads=false:")); @@ -321,7 +322,7 @@ public void testInnerDetectCPUMode() throws Exception { assertThat(innerResult, containsString("org.elasticsearch.monitor.testOther.methodFinal(Some_File:1)")); // Let's ask again without progressing the CPU thread counters, e.g. resetting the mocks - innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(innerResult, containsString("0.0% [cpu=0.0%, other=0.0%] (0s out of 10ms) cpu usage by thread 'Thread 4'")); assertThat(innerResult, containsString("0.0% [cpu=0.0%, other=0.0%] (0s out of 10ms) cpu usage by thread 'Thread 3'")); @@ -340,7 +341,7 @@ public void testInnerDetectCPUMode() throws Exception { .threadElementsSnapshotCount(11) .ignoreIdleThreads(false); - innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(innerResult, containsString("Hot threads at ")); assertThat(innerResult, containsString("interval=10ms, busiestThreads=4, ignoreIdleThreads=false:")); @@ -377,7 +378,7 @@ public void testInnerDetectWaitMode() throws Exception { List waitOrderedInfos = List.of(allInfos.get(3), allInfos.get(1), allInfos.get(0), allInfos.get(2)); when(mockedMXBean.getThreadInfo(ArgumentMatchers.any(), anyInt())).thenReturn(waitOrderedInfos.toArray(new ThreadInfo[0])); - String waitInnerResult = hotWaitingThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String waitInnerResult = innerDetect(hotWaitingThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat( waitInnerResult, @@ -401,7 +402,7 @@ public void testInnerDetectWaitMode() throws Exception { waitOrderedInfos = List.of(allInfos.get(3), allInfos.get(1), allInfos.get(0), allInfos.get(2)); when(mockedMXBean.getThreadInfo(ArgumentMatchers.any(), anyInt())).thenReturn(waitOrderedInfos.toArray(new ThreadInfo[0])); - waitInnerResult = hotWaitingThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + waitInnerResult = innerDetect(hotWaitingThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat( waitInnerResult, @@ -431,7 +432,7 @@ public void testInnerDetectBlockedMode() throws Exception { List blockOrderedInfos = List.of(allInfos.get(2), allInfos.get(0), allInfos.get(1), allInfos.get(3)); when(mockedMXBean.getThreadInfo(ArgumentMatchers.any(), anyInt())).thenReturn(blockOrderedInfos.toArray(new ThreadInfo[0])); - String blockInnerResult = hotBlockedThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String blockInnerResult = innerDetect(hotBlockedThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat( blockInnerResult, @@ -455,7 +456,7 @@ public void testInnerDetectBlockedMode() throws Exception { blockOrderedInfos = List.of(allInfos.get(2), allInfos.get(0), allInfos.get(1), allInfos.get(3)); when(mockedMXBean.getThreadInfo(ArgumentMatchers.any(), anyInt())).thenReturn(blockOrderedInfos.toArray(new ThreadInfo[0])); - blockInnerResult = hotBlockedThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + blockInnerResult = innerDetect(hotBlockedThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat( blockInnerResult, @@ -490,7 +491,7 @@ public void testInnerDetectMemoryMode() throws Exception { .threadElementsSnapshotCount(1) .ignoreIdleThreads(false); - String memInnerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String memInnerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(memInnerResult, containsString(" unique snapshot")); assertThat( memInnerResult, @@ -519,7 +520,7 @@ public void testInnerDetectMemoryMode() throws Exception { .threadElementsSnapshotCount(1) .ignoreIdleThreads(false); - memInnerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + memInnerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(memInnerResult, containsString(" unique snapshot")); assertThat( memInnerResult, @@ -551,7 +552,7 @@ public void testInnerDetectSingleSnapshot() throws Exception { .threadElementsSnapshotCount(1) .ignoreIdleThreads(false); - String singleResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String singleResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(singleResult, containsString(" unique snapshot")); assertEquals(5, singleResult.split(" unique snapshot").length); @@ -581,7 +582,7 @@ public void testEnsureInnerDetectSkipsCurrentThread() throws Exception { .threadElementsSnapshotCount(11) .ignoreIdleThreads(false); - String innerResult = hotThreads.innerDetect(mockedMXBean, mock(SunThreadInfo.class), mockCurrentThreadId, (interval) -> null); + String innerResult = innerDetect(hotThreads, mockedMXBean, mock(SunThreadInfo.class), mockCurrentThreadId); assertEquals(1, innerResult.lines().count()); } @@ -811,7 +812,7 @@ public void testWaitBlockTimeMonitoringEnabled() throws Exception { Exception e = expectThrows( ElasticsearchException.class, - () -> hotThreads.innerDetect(mockedMXBean, mock(SunThreadInfo.class), mockCurrentThreadId, (interval) -> null) + () -> innerDetect(hotThreads, mockedMXBean, mock(SunThreadInfo.class), mockCurrentThreadId) ); assertEquals(e.getMessage(), "thread wait/blocked time accounting is not supported on this JDK"); @@ -846,7 +847,7 @@ public void testGetThreadAllocatedBytesFailures() throws Exception { ElasticsearchException exception = expectThrows( ElasticsearchException.class, - () -> hotThreads0.innerDetect(mockedMXBean, mockedSunThreadInfo, 0L, (interval) -> null) + () -> innerDetect(hotThreads0, mockedMXBean, mockedSunThreadInfo, 0L) ); assertThat(exception.getMessage(), equalTo("thread allocated memory is not supported on this JDK")); } @@ -869,7 +870,7 @@ public void testInnerDetectCPUModeTransportThreads() throws Exception { .threadElementsSnapshotCount(11) .ignoreIdleThreads(false); - String innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + String innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(innerResult, containsString("Hot threads at ")); assertThat(innerResult, containsString("interval=10ms, busiestThreads=4, ignoreIdleThreads=false:")); @@ -888,7 +889,7 @@ public void testInnerDetectCPUModeTransportThreads() throws Exception { assertThat(innerResult, containsString("org.elasticsearch.monitor.testOther.methodFinal(Some_File:1)")); // Let's ask again without progressing the CPU thread counters, e.g. resetting the mocks - innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat( innerResult, @@ -919,7 +920,7 @@ public void testInnerDetectCPUModeTransportThreads() throws Exception { .threadElementsSnapshotCount(11) .ignoreIdleThreads(false); - innerResult = hotThreads.innerDetect(mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId, (interval) -> null); + innerResult = innerDetect(hotThreads, mockedMXBean, mockedSunThreadInfo, mockCurrentThreadId); assertThat(innerResult, containsString("Hot threads at ")); assertThat(innerResult, containsString("interval=10ms, busiestThreads=4, ignoreIdleThreads=false:")); @@ -937,4 +938,16 @@ public void testInnerDetectCPUModeTransportThreads() throws Exception { assertThat(innerResult, containsString("org.elasticsearch.monitor.test.method_1(Some_File:1)")); assertThat(innerResult, containsString("org.elasticsearch.monitor.testOther.methodFinal(Some_File:1)")); } + + private static String innerDetect( + HotThreads hotThreads, + ThreadMXBean mockedMthreadMXBeanBean, + SunThreadInfo sunThreadInfo, + long currentThreadId + ) throws Exception { + try (var writer = new StringWriter()) { + hotThreads.innerDetect(mockedMthreadMXBeanBean, sunThreadInfo, currentThreadId, (interval) -> null, writer); + return writer.toString(); + } + } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/ShardSnapshotStatusWireSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/ShardSnapshotStatusWireSerializationTests.java index 7257cc913e338..d1d08e785f603 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/ShardSnapshotStatusWireSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/ShardSnapshotStatusWireSerializationTests.java @@ -33,7 +33,7 @@ protected SnapshotsInProgress.ShardSnapshotStatus createTestInstance() { return SnapshotsInProgress.ShardSnapshotStatus.success(nodeId, randomShardSnapshotResult()); } else { final String reason = shardState.failed() ? randomAlphaOfLength(10) : null; - return new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, reason, ShardGeneration.newGeneration()); + return new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, ShardGeneration.newGeneration(), reason); } } @@ -68,26 +68,26 @@ protected SnapshotsInProgress.ShardSnapshotStatus mutateInstance(SnapshotsInProg return new SnapshotsInProgress.ShardSnapshotStatus( instance.nodeId(), newState, - randomAlphaOfLength(15 - instance.reason().length()), - instance.generation() + instance.generation(), + randomAlphaOfLength(15 - instance.reason().length()) ); } else { final String reason = newState.failed() ? randomAlphaOfLength(10) : null; if (newState != instance.state() && randomBoolean()) { - return new SnapshotsInProgress.ShardSnapshotStatus(instance.nodeId(), newState, reason, instance.generation()); + return new SnapshotsInProgress.ShardSnapshotStatus(instance.nodeId(), newState, instance.generation(), reason); } else if (randomBoolean()) { return new SnapshotsInProgress.ShardSnapshotStatus( randomAlphaOfLength(11 - instance.nodeId().length()), newState, - reason, - instance.generation() + instance.generation(), + reason ); } else { return new SnapshotsInProgress.ShardSnapshotStatus( instance.nodeId(), newState, - reason, - randomValueOtherThan(instance.generation(), ShardGeneration::newGeneration) + randomValueOtherThan(instance.generation(), ShardGeneration::newGeneration), + reason ); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 74277954b8002..893242ccaa308 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -104,7 +104,7 @@ private SnapshotsInProgress.ShardSnapshotStatus randomShardSnapshotStatus(String return SnapshotsInProgress.ShardSnapshotStatus.success(nodeId, shardSnapshotResult); } else { final String reason = shardState.failed() ? randomAlphaOfLength(10) : null; - return new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, reason, new ShardGeneration(1L)); + return new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, new ShardGeneration(1L), reason); } } @@ -406,8 +406,8 @@ public void testXContent() throws IOException { new SnapshotsInProgress.ShardSnapshotStatus( "nodeId", ShardState.FAILED, - "failure-reason", - new ShardGeneration("fail-gen") + new ShardGeneration("fail-gen"), + "failure-reason" ) ), null, diff --git a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java index 41e6a818a62da..f6c5fdfe4db5c 100644 --- a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java +++ b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/MetricsApmIT.java @@ -56,6 +56,7 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103286") @SuppressWarnings("unchecked") public void testApmIntegration() throws Exception { Map>> sampleAssertions = new HashMap<>( diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 71030358e901f..b1765218ff7f2 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotRestoreException; @@ -333,20 +334,8 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t logger.info("--> add random documents to {}", index); addRandomDocuments(index, randomIntBetween(10, 1000)); } else { - var resp = prepareSearch(index).setSize(0).get(); - final int docCount; - try { - docCount = (int) resp.getHits().getTotalHits().value; - } finally { - resp.decRef(); - } - int deleteCount = randomIntBetween(1, docCount); - logger.info("--> delete {} random documents from {}", deleteCount, index); - for (int i = 0; i < deleteCount; i++) { - int doc = randomIntBetween(0, docCount - 1); - client().prepareDelete(index, Integer.toString(doc)).get(); - } - client().admin().indices().prepareRefresh(index).get(); + final int docCount = (int) SearchResponseUtils.getTotalHitsValue(prepareSearch(index).setSize(0)); + deleteRandomDocs(index, docCount); } } @@ -395,13 +384,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { if (randomBoolean() && i > 0) { // don't delete on the first iteration int docCount = docCounts[i - 1]; if (docCount > 0) { - int deleteCount = randomIntBetween(1, docCount); - logger.info("--> delete {} random documents from {}", deleteCount, indexName); - for (int j = 0; j < deleteCount; j++) { - int doc = randomIntBetween(0, docCount - 1); - client().prepareDelete(indexName, Integer.toString(doc)).get(); - } - client().admin().indices().prepareRefresh(indexName).get(); + deleteRandomDocs(indexName, docCount); } } else { int docCount = randomIntBetween(10, 1000); @@ -409,12 +392,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { addRandomDocuments(indexName, docCount); } // Check number of documents in this iteration - var resp = prepareSearch(indexName).setSize(0).get(); - try { - docCounts[i] = (int) resp.getHits().getTotalHits().value; - } finally { - resp.decRef(); - } + docCounts[i] = (int) SearchResponseUtils.getTotalHitsValue(prepareSearch(indexName).setSize(0)); logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]); assertSuccessfulSnapshot( clusterAdmin().prepareCreateSnapshot(repoName, snapshotName + "-" + i).setWaitForCompletion(true).setIndices(indexName) @@ -446,6 +424,16 @@ public void testMultipleSnapshotAndRollback() throws Exception { } } + private void deleteRandomDocs(String indexName, int existingDocCount) { + int deleteCount = randomIntBetween(1, existingDocCount); + logger.info("--> delete {} random documents from {}", deleteCount, indexName); + for (int j = 0; j < deleteCount; j++) { + int doc = randomIntBetween(0, existingDocCount - 1); + client().prepareDelete(indexName, Integer.toString(doc)).get(); + } + client().admin().indices().prepareRefresh(indexName).get(); + } + public void testIndicesDeletedFromRepository() throws Exception { final String repoName = createRepository(randomRepositoryName()); Client client = client(); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 0b5b953df84fc..5ab1641307fc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -47,6 +46,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.mockstore.MockRepository; @@ -502,14 +502,9 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce } protected long getCountForIndex(String indexName) { - var resp = client().search( - new SearchRequest(new SearchRequest(indexName).source(new SearchSourceBuilder().size(0).trackTotalHits(true))) - ).actionGet(); - try { - return resp.getHits().getTotalHits().value; - } finally { - resp.decRef(); - } + return SearchResponseUtils.getTotalHitsValue( + client().prepareSearch(indexName).setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) + ); } protected void assertDocCount(String index, long count) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 779d846f4eac2..23721de4aad9c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -129,6 +129,7 @@ import org.elasticsearch.search.ConcurrentSearchTestPlugin; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.client.RandomizingClient; import org.elasticsearch.test.disruption.NetworkDisruption; @@ -1064,17 +1065,14 @@ public void waitForDocs(final long numDocs, final BackgroundIndexer indexer) thr if (lastKnownCount >= numDocs) { try { - var resp = prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()).get(); - try { - long count = resp.getHits().getTotalHits().value; - if (count == lastKnownCount) { - // no progress - try to refresh for the next time - indicesAdmin().prepareRefresh().get(); - } - lastKnownCount = count; - } finally { - resp.decRef(); + long count = SearchResponseUtils.getTotalHitsValue( + prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(matchAllQuery()) + ); + if (count == lastKnownCount) { + // no progress - try to refresh for the next time + indicesAdmin().prepareRefresh().get(); } + lastKnownCount = count; } catch (Exception e) { // count now acts like search and barfs if all shards failed... logger.debug("failed to executed count", e); throw e; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index a58810e91e186..fd5bd253fd8e5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -58,6 +58,9 @@ public class RestTestLegacyFeatures implements FeatureSpecification { @UpdateForV9 public static final NodeFeature ML_ANALYTICS_MAPPINGS = new NodeFeature("ml.analytics_mappings"); + // YAML + public static final NodeFeature REST_ELASTIC_PRODUCT_HEADER_PRESENT = new NodeFeature("action.rest.product_header_present"); + @Override public Map getHistoricalFeatures() { return Map.ofEntries( @@ -75,7 +78,8 @@ public Map getHistoricalFeatures() { entry(SEARCH_AGGREGATIONS_FORCE_INTERVAL_SELECTION_DATE_HISTOGRAM, Version.V_7_2_0), entry(TRANSFORM_NEW_API_ENDPOINT, Version.V_7_5_0), entry(ML_INDICES_HIDDEN, Version.V_7_7_0), - entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0) + entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0), + entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1) ); } } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 78c796ae8dd9c..e8a72042f7729 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -443,6 +443,21 @@ private void copyExtraConfigFiles() { }); } + public void updateStoredSecureSettings() { + if (usesSecureSecretsFile) { + throw new UnsupportedOperationException("updating stored secure settings is not supported in serverless test clusters"); + } + final Path keystoreFile = workingDir.resolve("config").resolve("elasticsearch.keystore"); + try { + Files.deleteIfExists(keystoreFile); + } catch (IOException e) { + throw new RuntimeException(e); + } + createKeystore(); + addKeystoreSettings(); + addKeystoreFiles(); + } + private void createKeystore() { if (spec.getKeystorePassword() == null || spec.getKeystorePassword().isEmpty()) { runToolScript("elasticsearch-keystore", null, "-v", "create"); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterHandle.java index b51d9e3c9c6cb..718c9c1bb0042 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterHandle.java @@ -179,6 +179,11 @@ public InputStream getNodeLog(int index, LogType logType) { return nodes.get(index).getLog(logType); } + @Override + public void updateStoredSecureSettings() { + execute(() -> nodes.parallelStream().forEach(Node::updateStoredSecureSettings)); + } + protected void waitUntilReady() { writeUnicastHostsFile(); try { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalElasticsearchCluster.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalElasticsearchCluster.java index 38d83e7652e98..77b73e7b6ce86 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalElasticsearchCluster.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalElasticsearchCluster.java @@ -156,6 +156,12 @@ public InputStream getNodeLog(int index, LogType logType) { return handle.getNodeLog(index, logType); } + @Override + public void updateStoredSecureSettings() { + checkHandle(); + handle.updateStoredSecureSettings(); + } + protected H getHandle() { return handle; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java index a3bba54bb4bf8..7a95d682e9ddc 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterHandle.java @@ -10,6 +10,7 @@ import org.elasticsearch.test.cluster.ClusterHandle; import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.MutableSettingsProvider; import org.elasticsearch.test.cluster.util.Version; import java.io.InputStream; @@ -93,4 +94,13 @@ public interface LocalClusterHandle extends ClusterHandle { * Returns an {@link InputStream} for the given node log. */ InputStream getNodeLog(int index, LogType logType); + + /** + * Writes secure settings to the relevant secure config file on each node. Use this method if you are dynamically updating secure + * settings via a {@link MutableSettingsProvider} and need the update to be written to file, without a cluster restart. + * + * @throws UnsupportedOperationException if secure settings are stored in a secrets file, i.e., in serverless. Only keystore-based + * storage is currently supported + */ + void updateStoredSecureSettings(); } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index 7cabb5543ac16..c95fc5c131df0 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.Predicate; /** * Used to execute REST requests according to the docs snippets that need to be tests. Wraps a @@ -40,11 +41,11 @@ public ClientYamlDocsTestClient( final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion, + final Predicate clusterFeaturesPredicate, final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, masterVersion, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); } @Override diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 7787807876724..d30f65718943e 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -47,6 +47,7 @@ import java.util.Map.Entry; import java.util.Set; import java.util.function.BiPredicate; +import java.util.function.Predicate; import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.frequently; @@ -64,24 +65,24 @@ public class ClientYamlTestClient implements Closeable { private final ClientYamlSuiteRestSpec restSpec; private final Map restClients = new HashMap<>(); private final Version esVersion; - private final Version masterVersion; private final String os; private final CheckedSupplier clientBuilderWithSniffedNodes; + private final Predicate clusterFeaturesPredicate; ClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion, + final Predicate clusterFeaturesPredicate, final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { + this.clusterFeaturesPredicate = clusterFeaturesPredicate; assert hosts.size() > 0; this.restSpec = restSpec; this.restClients.put(NodeSelector.ANY, restClient); this.esVersion = esVersion; - this.masterVersion = masterVersion; this.os = os; this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes; } @@ -93,8 +94,8 @@ public Version getEsVersion() { return esVersion; } - public Version getMasterVersion() { - return masterVersion; + public boolean clusterHasFeature(String featureId) { + return clusterFeaturesPredicate.test(featureId); } public String getOs() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 8b77acb3ee133..a584280119ef3 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -230,10 +230,6 @@ public Version esVersion() { return clientYamlTestClient.getEsVersion(); } - public Version masterVersion() { - return clientYamlTestClient.getMasterVersion(); - } - public String os() { return clientYamlTestClient.getOs(); } @@ -241,4 +237,8 @@ public String os() { public ClientYamlTestCandidate getClientYamlTestCandidate() { return clientYamlTestCandidate; } + + public boolean clusterHasFeature(String featureId) { + return clientYamlTestClient.clusterHasFeature(featureId); + } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 630ee9883ff83..2e1631cc8c337 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -61,6 +61,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; +import java.util.function.Predicate; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -150,7 +151,7 @@ public void initAndResetContext() throws Exception { hosts, os ); - clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, esVersion, masterVersion, os); + clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, esVersion, ESRestTestCase::clusterHasFeature, os); restTestExecutionContext = createRestTestExecutionContext(testCandidate, clientYamlTestClient); adminExecutionContext = new ClientYamlTestExecutionContext(testCandidate, clientYamlTestClient, false); final String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); @@ -188,10 +189,18 @@ protected ClientYamlTestClient initClientYamlTestClient( final RestClient restClient, final List hosts, final Version esVersion, - final Version masterVersion, + final Predicate clusterFeaturesPredicate, final String os ) { - return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion, masterVersion, os, this::getClientBuilderWithSniffedHosts); + return new ClientYamlTestClient( + restSpec, + restClient, + hosts, + esVersion, + clusterFeaturesPredicate, + os, + this::getClientBuilderWithSniffedHosts + ); } @AfterClass diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 6e9107152c6f7..bd038cc4dcd58 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -19,7 +19,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException; @@ -363,8 +365,16 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx final String testPath = executionContext.getClientYamlTestCandidate() != null ? executionContext.getClientYamlTestCandidate().getTestPath() : null; - if (executionContext.esVersion().major == Version.V_7_17_0.major && executionContext.esVersion().after(Version.V_7_17_1)) { - // #84038 and #84089 mean that this assertion fails when running against a small number of 7.17.x released versions + + // #84038 and #84089 mean that this assertion fails when running against < 7.17.2 and 8.0.0 released versions + // This is really difficult to express just with features, so I will break it down into 2 parts: version check for v7, + // and feature check for v8. This way the version check can be removed once we move to v9 + @UpdateForV9 + var fixedInV7 = executionContext.esVersion().major == Version.V_7_17_0.major + && executionContext.esVersion().onOrAfter(Version.V_7_17_2); + var fixedProductionHeader = fixedInV7 + || executionContext.clusterHasFeature(RestTestLegacyFeatures.REST_ELASTIC_PRODUCT_HEADER_PRESENT.id()); + if (fixedProductionHeader) { checkElasticProductHeader(response.getHeaders("X-elastic-product")); } checkWarningHeaders(response.getWarningHeaders(), testPath); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java index 8e597c3992528..ca1f1b6c4f12c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; @@ -19,6 +18,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.shard.CloseFollowerIndexErrorSuppressionHelper; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -125,14 +125,12 @@ public void testCloseAndReopenFollowerIndex() throws Exception { ensureFollowerGreen("index2"); refresh(leaderClient(), "index1"); - SearchRequest leaderSearchRequest = new SearchRequest("index1"); - leaderSearchRequest.source().trackTotalHits(true); - long leaderIndexDocs = leaderClient().search(leaderSearchRequest).actionGet().getHits().getTotalHits().value; + long leaderIndexDocs = SearchResponseUtils.getTotalHitsValue(leaderClient().prepareSearch("index1").setTrackTotalHits(true)); assertBusy(() -> { refresh(followerClient(), "index2"); - SearchRequest followerSearchRequest = new SearchRequest("index2"); - followerSearchRequest.source().trackTotalHits(true); - long followerIndexDocs = followerClient().search(followerSearchRequest).actionGet().getHits().getTotalHits().value; + long followerIndexDocs = SearchResponseUtils.getTotalHitsValue( + followerClient().prepareSearch("index2").setTrackTotalHits(true) + ); assertThat(followerIndexDocs, equalTo(leaderIndexDocs)); }, 30L, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 88482eabafed5..2adf046e6ba58 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -119,6 +119,7 @@ import java.util.stream.Stream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.hamcrest.Matchers.containsString; @@ -369,9 +370,7 @@ public void testSyncMappings() throws Exception { leaderClient().prepareIndex("index1").setId(Long.toString(i)).setSource(source, XContentType.JSON).get(); } - assertBusy( - () -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), firstBatchNumDocs)); MappingMetadata mappingMetadata = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings().get("index2"); assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetadata.sourceAsMap()), equalTo("integer")); assertThat(XContentMapValues.extractValue("properties.k", mappingMetadata.sourceAsMap()), nullValue()); @@ -382,12 +381,7 @@ public void testSyncMappings() throws Exception { leaderClient().prepareIndex("index1").setId(Long.toString(i)).setSource(source, XContentType.JSON).get(); } - assertBusy( - () -> assertThat( - followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs) - ) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), firstBatchNumDocs + secondBatchNumDocs)); mappingMetadata = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings().get("index2"); assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetadata.sourceAsMap()), equalTo("integer")); assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetadata.sourceAsMap()), equalTo("long")); @@ -413,7 +407,7 @@ public void testNoMappingDefined() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{\"f\":1}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); pauseFollow("index2"); MappingMetadata mappingMetadata = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings().get("index2"); @@ -711,7 +705,7 @@ public void testCloseLeaderIndex() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); leaderClient().admin().indices().close(new CloseIndexRequest("index1")).actionGet(); assertBusy(() -> { @@ -735,7 +729,7 @@ public void testCloseLeaderIndex() throws Exception { leaderClient().admin().indices().open(new OpenIndexRequest("index1")).actionGet(); leaderClient().prepareIndex("index1").setId("2").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(2L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 2)); pauseFollow("index2"); } @@ -757,7 +751,7 @@ public void testCloseFollowIndex() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); followerClient().admin().indices().close(new CloseIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE)).actionGet(); leaderClient().prepareIndex("index1").setId("2").setSource("{}", XContentType.JSON).get(); @@ -769,7 +763,7 @@ public void testCloseFollowIndex() throws Exception { assertThat(response.getStatsResponses().get(0).status().failedWriteRequests(), greaterThanOrEqualTo(1L)); }); followerClient().admin().indices().open(new OpenIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE)).actionGet(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(2L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 2)); pauseFollow("index2"); } @@ -791,7 +785,7 @@ public void testDeleteLeaderIndex() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); leaderClient().admin().indices().delete(new DeleteIndexRequest("index1")).actionGet(); assertBusy(() -> { @@ -872,7 +866,7 @@ public void testDeleteFollowerIndex() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); followerClient().admin().indices().delete(new DeleteIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE)).actionGet(); leaderClient().prepareIndex("index1").setId("2").setSource("{}", XContentType.JSON).get(); @@ -935,7 +929,7 @@ public void testUnfollowIndex() throws Exception { PutFollowAction.Request followRequest = putFollow("index1", "index2"); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> { assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L)); }); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); // Indexing directly into index2 would fail now, because index2 is a follow index. // We can't test this here because an assertion trips before an actual error is thrown and then index call hangs. @@ -952,7 +946,7 @@ public void testUnfollowIndex() throws Exception { .setSource("{}", XContentType.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(2L)); + assertHitCount(followerClient().prepareSearch("index2"), 2); } public void testUnknownClusterAlias() throws Exception { @@ -1024,9 +1018,7 @@ public void testUpdateDynamicLeaderIndexSettings() throws Exception { for (long i = 0; i < firstBatchNumDocs; i++) { leaderClient().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } - assertBusy( - () -> assertThat(followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs)); // Sanity check that the setting has not been set in follower index: { @@ -1053,10 +1045,7 @@ public void testUpdateDynamicLeaderIndexSettings() throws Exception { assertThat(getFollowTaskSettingsVersion("follower"), equalTo(2L)); try { - assertThat( - followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs) - ); + assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs + secondBatchNumDocs); } catch (Exception e) { throw new AssertionError("error while searching", e); } @@ -1080,9 +1069,7 @@ public void testLeaderIndexSettingNotPercolatedToFollower() throws Exception { for (long i = 0; i < firstBatchNumDocs; i++) { leaderClient().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } - assertBusy( - () -> assertThat(followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs)); // Sanity check that the setting has not been set in follower index: { @@ -1108,10 +1095,7 @@ public void testLeaderIndexSettingNotPercolatedToFollower() throws Exception { assertThat(getFollowTaskSettingsVersion("follower"), equalTo(2L)); try { - assertThat( - followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs) - ); + assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs + secondBatchNumDocs); } catch (Exception e) { throw new AssertionError("error while searching", e); } @@ -1133,9 +1117,7 @@ public void testUpdateAnalysisLeaderIndexSettings() throws Exception { leaderClient().prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } - assertBusy( - () -> assertThat(followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs)); assertThat(getFollowTaskSettingsVersion("follower"), equalTo(1L)); assertThat(getFollowTaskMappingVersion("follower"), equalTo(1L)); @@ -1185,10 +1167,7 @@ public void testUpdateAnalysisLeaderIndexSettings() throws Exception { ); try { - assertThat( - followerClient().prepareSearch("follower").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs) - ); + assertHitCount(followerClient().prepareSearch("follower"), firstBatchNumDocs + secondBatchNumDocs); } catch (Exception e) { throw new AssertionError("error while searching", e); } @@ -1574,7 +1553,7 @@ public void testCleanUpShardFollowTasksForDeletedIndices() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); leaderClient().prepareIndex("index1").setId("1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(1L))); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), 1)); assertBusy(() -> { String action = ShardFollowTask.NAME + "[c]"; diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java index d609b606238bc..05fc3b037c795 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java @@ -32,6 +32,7 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -53,21 +54,14 @@ public void testFollowIndex() throws Exception { final PutFollowAction.Request followRequest = getPutFollowRequest("leader", "follower"); client().execute(PutFollowAction.INSTANCE, followRequest).get(); - assertBusy( - () -> { assertThat(client().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)); } - ); + assertBusy(() -> assertHitCount(client().prepareSearch("follower"), firstBatchNumDocs)); final long secondBatchNumDocs = randomIntBetween(2, 64); for (int i = 0; i < secondBatchNumDocs; i++) { prepareIndex("leader").setSource("{}", XContentType.JSON).get(); } - assertBusy(() -> { - assertThat( - client().prepareSearch("follower").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs) - ); - }); + assertBusy(() -> assertHitCount(client().prepareSearch("follower"), firstBatchNumDocs + secondBatchNumDocs)); PauseFollowAction.Request pauseRequest = new PauseFollowAction.Request("follower"); client().execute(PauseFollowAction.INSTANCE, pauseRequest); @@ -78,12 +72,7 @@ public void testFollowIndex() throws Exception { } client().execute(ResumeFollowAction.INSTANCE, getResumeFollowRequest("follower")).get(); - assertBusy(() -> { - assertThat( - client().prepareSearch("follower").get().getHits().getTotalHits().value, - equalTo(firstBatchNumDocs + secondBatchNumDocs + thirdBatchNumDocs) - ); - }); + assertBusy(() -> assertHitCount(client().prepareSearch("follower"), firstBatchNumDocs + secondBatchNumDocs + thirdBatchNumDocs)); ensureEmptyWriteBuffers(); } @@ -136,9 +125,7 @@ public void testIndexingMetricsIncremented() throws Exception { assertEquals(firstBatchNumDocs, indexingPressure.stats().getCurrentPrimaryOps()); }); blocker.countDown(); - assertBusy( - () -> { assertThat(client().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)); } - ); + assertBusy(() -> assertHitCount(client().prepareSearch("follower"), firstBatchNumDocs)); ensureEmptyWriteBuffers(); } finally { if (blocker.getCount() > 0) { @@ -210,7 +197,7 @@ public void testChangeLeaderIndex() throws Exception { prepareIndex("index-1").setSource("{}", XContentType.JSON).get(); } client().execute(PutFollowAction.INSTANCE, getPutFollowRequest("index-1", "index-2")).get(); - assertBusy(() -> assertThat(client().prepareSearch("index-2").get().getHits().getTotalHits().value, equalTo((long) numDocs))); + assertBusy(() -> assertHitCount(client().prepareSearch("index-2"), numDocs)); // Then switch index-1 to be a follower of index-0 assertAcked(client().admin().indices().prepareCreate("index-0").setSource(settings, XContentType.JSON)); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java index 2cb58a9991176..5c152be35b509 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -78,9 +79,7 @@ public void testFollowIndex() throws Exception { leaderClient().prepareIndex("index1").setId(Integer.toString(i)).setSource(source, XContentType.JSON).get(); } - assertBusy( - () -> assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)) - ); + assertBusy(() -> assertHitCount(followerClient().prepareSearch("index2"), firstBatchNumDocs)); getFollowerCluster().fullRestart(); ensureFollowerGreen("index2"); @@ -115,7 +114,7 @@ public void testFollowIndex() throws Exception { followerClient().execute(PutFollowAction.INSTANCE, putFollow("index1", "index2", ActiveShardCount.ALL)).actionGet(); } } - assertThat(followerClient().prepareSearch("index2").get().getHits().getTotalHits().value, equalTo(totalDocs)); + assertHitCount(followerClient().prepareSearch("index2"), totalDocs); }, 30L, TimeUnit.SECONDS); cleanRemoteCluster(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 9bf22bd4e0ca3..047a2d6225035 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -61,6 +61,7 @@ import org.elasticsearch.license.LicensesMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; @@ -794,14 +795,9 @@ public void waitForDocs(final long numDocs, final BackgroundIndexer indexer) thr if (lastKnownCount >= numDocs) { try { - long count = indexer.getClient() - .prepareSearch() - .setTrackTotalHits(true) - .setSize(0) - .setQuery(QueryBuilders.matchAllQuery()) - .get() - .getHits() - .getTotalHits().value; + long count = SearchResponseUtils.getTotalHitsValue( + indexer.getClient().prepareSearch().setTrackTotalHits(true).setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); if (count == lastKnownCount) { // no progress - try to refresh for the next time diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index 90a525c2df45c..d1599c8b6a827 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -59,6 +59,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @ESIntegTestCase.ClusterScope(numDataNodes = 0) @@ -123,7 +124,7 @@ public void testSnapshotAndRestore() throws Exception { assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); // can-match phase pre-filters access to non-existing field - assertEquals(0, prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")).get().getHits().getTotalHits().value); + assertHitCount(prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")), 0); // make sure deletes do not work String idToDelete = "" + randomIntBetween(0, builders.length); expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete).get()); @@ -147,7 +148,7 @@ public void testSnapshotAndRestoreWithNested() throws Exception { ); assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); // can-match phase pre-filters access to non-existing field - assertEquals(0, prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")).get().getHits().getTotalHits().value); + assertHitCount(prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")), 0); // make sure deletes do not work String idToDelete = "" + randomIntBetween(0, builders.length); expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete).get()); @@ -253,7 +254,6 @@ private static void assertMappings(String sourceIdx, boolean requireRouting, boo } private void assertHits(String index, int numDocsExpected, boolean sourceHadDeletions) { - SearchResponse searchResponse = prepareSearch(index).addSort(SeqNoFieldMapper.NAME, SortOrder.ASC).setSize(numDocsExpected).get(); BiConsumer assertConsumer = (res, allowHoles) -> { SearchHits hits = res.getHits(); long i = 0; @@ -272,9 +272,11 @@ private void assertHits(String index, int numDocsExpected, boolean sourceHadDele assertEquals("r" + id, hit.field("_routing").getValue()); } }; - assertConsumer.accept(searchResponse, sourceHadDeletions); - assertEquals(numDocsExpected, searchResponse.getHits().getTotalHits().value); - searchResponse = prepareSearch(index).addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) + assertResponse(prepareSearch(index).addSort(SeqNoFieldMapper.NAME, SortOrder.ASC).setSize(numDocsExpected), searchResponse -> { + assertConsumer.accept(searchResponse, sourceHadDeletions); + assertEquals(numDocsExpected, searchResponse.getHits().getTotalHits().value); + }); + SearchResponse searchResponse = prepareSearch(index).addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) .setScroll("1m") .slice(new SliceBuilder(SeqNoFieldMapper.NAME, randomIntBetween(0, 1), 2)) .setSize(randomIntBetween(1, 10)) @@ -283,12 +285,14 @@ private void assertHits(String index, int numDocsExpected, boolean sourceHadDele do { // now do a scroll with a slice assertConsumer.accept(searchResponse, true); + searchResponse.decRef(); searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); } while (searchResponse.getHits().getHits().length > 0); } finally { if (searchResponse.getScrollId() != null) { client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); } + searchResponse.decRef(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java index d691cb0eb4c53..5d1b2ef9a08e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -152,9 +152,8 @@ public static void createIndexAndAliasIfNecessary( firstConcreteIndex, alias, false, - ActionListener.wrap( - unused -> updateWriteAlias(client, alias, legacyIndexWithoutSuffix, firstConcreteIndex, indexCreatedListener), - loggingListener::onFailure + indexCreatedListener.delegateFailureAndWrap( + (l, unused) -> updateWriteAlias(client, alias, legacyIndexWithoutSuffix, firstConcreteIndex, l) ) ); return; @@ -296,7 +295,7 @@ private static void updateWriteAlias( client.threadPool().getThreadContext(), ML_ORIGIN, request, - ActionListener.wrap(resp -> listener.onResponse(resp.isAcknowledged()), listener::onFailure), + listener.delegateFailureAndWrap((l, resp) -> l.onResponse(resp.isAcknowledged())), client.admin().indices()::aliases ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 5b5754af78018..29453205b4d00 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -125,7 +125,10 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener null, 1 ); - nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + ActionListener.respondAndRelease( + nextPhase, + new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + ); } @Override @@ -267,7 +270,10 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener awaitForLatch(); } - nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + ActionListener.respondAndRelease( + nextPhase, + new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + ); } @Override diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java index 2988af7a2dab6..c0e1c054f7a13 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java @@ -125,28 +125,27 @@ public void check(Components components, ActionListener deprecation getModelSnapshots.setPageParams(new PageParams(0, 50)); getModelSnapshots.setSort(ModelSnapshot.MIN_VERSION.getPreferredName()); - ActionListener getModelSnaphots = ActionListener.wrap( - _unused -> components.client() - .execute(GetModelSnapshotsAction.INSTANCE, getModelSnapshots, ActionListener.wrap(modelSnapshots -> { + ActionListener getModelSnaphots = deprecationIssueListener.delegateFailureAndWrap( + (delegate, _unused) -> components.client() + .execute(GetModelSnapshotsAction.INSTANCE, getModelSnapshots, delegate.delegateFailureAndWrap((l, modelSnapshots) -> { modelSnapshots.getResources() .results() .forEach(modelSnapshot -> checkModelSnapshot(modelSnapshot).ifPresent(issues::add)); - deprecationIssueListener.onResponse(new CheckResult(getName(), issues)); - }, deprecationIssueListener::onFailure)), - deprecationIssueListener::onFailure + l.onResponse(new CheckResult(getName(), issues)); + })) ); components.client() .execute( GetDatafeedsAction.INSTANCE, new GetDatafeedsAction.Request(GetDatafeedsAction.ALL), - ActionListener.wrap(datafeedsResponse -> { + getModelSnaphots.delegateFailureAndWrap((delegate, datafeedsResponse) -> { for (DatafeedConfig df : datafeedsResponse.getResponse().results()) { checkDataFeedAggregations(df, components.xContentRegistry()).ifPresent(issues::add); checkDataFeedQuery(df, components.xContentRegistry()).ifPresent(issues::add); } - getModelSnaphots.onResponse(null); - }, deprecationIssueListener::onFailure) + delegate.onResponse(null); + }) ); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 322267a14d32f..4dc5195f8345a 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -268,7 +268,7 @@ protected void masterOperation( final TaskId parentTask = new TaskId(clusterService.localNode().getId(), task.getId()); final GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(sourceIndexName); getMappingsRequest.setParentTask(parentTask); - client.admin().indices().getMappings(getMappingsRequest, ActionListener.wrap(getMappingsResponse -> { + client.admin().indices().getMappings(getMappingsRequest, listener.delegateFailureAndWrap((delegate, getMappingsResponse) -> { final Map sourceIndexMappings = getMappingsResponse.mappings() .entrySet() .stream() @@ -307,7 +307,7 @@ protected void masterOperation( } if (validationException.validationErrors().isEmpty() == false) { - listener.onFailure(validationException); + delegate.onFailure(validationException); return; } @@ -315,7 +315,7 @@ protected void masterOperation( try { mapping = createDownsampleIndexMapping(helper, request.getDownsampleConfig(), mapperService, sourceIndexMappings); } catch (IOException e) { - listener.onFailure(e); + delegate.onFailure(e); return; } // 3. Create downsample index @@ -329,7 +329,7 @@ protected void masterOperation( if (createIndexResp.isAcknowledged()) { performShardDownsampling( request, - listener, + delegate, sourceIndexMetadata, downsampleIndexName, parentTask, @@ -337,13 +337,13 @@ protected void masterOperation( labelFields ); } else { - listener.onFailure(new ElasticsearchException("Failed to create downsample index [" + downsampleIndexName + "]")); + delegate.onFailure(new ElasticsearchException("Failed to create downsample index [" + downsampleIndexName + "]")); } }, e -> { if (e instanceof ResourceAlreadyExistsException) { performShardDownsampling( request, - listener, + delegate, sourceIndexMetadata, downsampleIndexName, parentTask, @@ -351,11 +351,11 @@ protected void masterOperation( labelFields ); } else { - listener.onFailure(e); + delegate.onFailure(e); } }) ); - }, listener::onFailure)); + })); } // 3. downsample index created or already exist (in case of retry). Run downsample indexer persistent task on each shard. diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java index 02d19bd0e0ff1..6b815f3fc7445 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/TransportEnrichStatsAction.java @@ -64,9 +64,9 @@ protected void masterOperation( EnrichStatsAction.Request request, ClusterState state, ActionListener listener - ) throws Exception { + ) { EnrichCoordinatorStatsAction.Request statsRequest = new EnrichCoordinatorStatsAction.Request(); - ActionListener statsListener = ActionListener.wrap(response -> { + ActionListener statsListener = listener.delegateFailureAndWrap((delegate, response) -> { if (response.hasFailures()) { // Report failures even if some node level requests succeed: Exception failure = null; @@ -77,7 +77,7 @@ protected void masterOperation( failure.addSuppressed(nodeFailure); } } - listener.onFailure(failure); + delegate.onFailure(failure); return; } @@ -100,8 +100,8 @@ protected void masterOperation( .filter(Objects::nonNull) .sorted(Comparator.comparing(EnrichStatsAction.Response.CacheStats::getNodeId)) .collect(Collectors.toList()); - listener.onResponse(new EnrichStatsAction.Response(policyExecutionTasks, coordinatorStats, cacheStats)); - }, listener::onFailure); + delegate.onResponse(new EnrichStatsAction.Response(policyExecutionTasks, coordinatorStats, cacheStats)); + }); client.execute(EnrichCoordinatorStatsAction.INSTANCE, statsRequest, statsListener); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index 9ab7f7dea9b2d..bc7a733a3daed 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; @@ -1653,22 +1652,24 @@ public void testRunnerTwoObjectLevelsSourceMappingDateRangeWithFormat() throws E }""", XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet(); assertEquals(RestStatus.CREATED, indexRequest.status()); - SearchResponse sourceSearchResponse = client().search( - new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery())) - ).actionGet(); - assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); - assertNotNull(sourceDocMap); - Map dataField = ((Map) sourceDocMap.get("data")); - assertNotNull(dataField); - Map fieldsField = ((Map) dataField.get("fields")); - assertNotNull(fieldsField); - Map periodField = ((Map) fieldsField.get("period")); - assertNotNull(periodField); - assertThat(periodField.get("gte"), is(equalTo("2021/08/20 at 12:00"))); - assertThat(periodField.get("lte"), is(equalTo("2021/08/28 at 23:00"))); - assertThat(fieldsField.get("status"), is(equalTo("enrolled"))); - assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); + assertResponse( + client().search(new SearchRequest(sourceIndex).source(SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()))), + sourceSearchResponse -> { + assertThat(sourceSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map sourceDocMap = sourceSearchResponse.getHits().getAt(0).getSourceAsMap(); + assertNotNull(sourceDocMap); + Map dataField = ((Map) sourceDocMap.get("data")); + assertNotNull(dataField); + Map fieldsField = ((Map) dataField.get("fields")); + assertNotNull(fieldsField); + Map periodField = ((Map) fieldsField.get("period")); + assertNotNull(periodField); + assertThat(periodField.get("gte"), is(equalTo("2021/08/20 at 12:00"))); + assertThat(periodField.get("lte"), is(equalTo("2021/08/28 at 23:00"))); + assertThat(fieldsField.get("status"), is(equalTo("enrolled"))); + assertThat(fieldsField.get("field3"), is(equalTo("ignored"))); + } + ); String policyName = "test1"; List enrichFields = List.of("data.fields.status", "missingField"); @@ -1716,42 +1717,48 @@ public void testRunnerTwoObjectLevelsSourceMappingDateRangeWithFormat() throws E } """); - SearchResponse enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021-08-19T14:00:00Z")) - ) - ).actionGet(); - - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021-08-19T14:00:00Z")) + ) + ), + enrichSearchResponse -> assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(0L)) + ); - enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021-08-20T14:00:00Z")) - ) - ).actionGet(); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021-08-20T14:00:00Z")) + ) + ), + enrichSearchResponse -> { + assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); + assertNotNull(enrichDocument); + assertThat(enrichDocument.size(), is(equalTo(1))); + Map resultDataField = ((Map) enrichDocument.get("data")); + assertNotNull(resultDataField); + Map resultFieldsField = ((Map) resultDataField.get("fields")); + assertNotNull(resultFieldsField); + assertThat(resultFieldsField.size(), is(equalTo(2))); + Map resultsPeriodField = ((Map) resultFieldsField.get("period")); + assertNotNull(resultsPeriodField); + assertThat(resultsPeriodField.get("gte"), is(equalTo("2021/08/20 at 12:00"))); + assertThat(resultsPeriodField.get("lte"), is(equalTo("2021/08/28 at 23:00"))); + assertThat(resultFieldsField.get("status"), is(equalTo("enrolled"))); + assertNull(resultFieldsField.get("field3")); + } + ); - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); - Map enrichDocument = enrichSearchResponse.getHits().iterator().next().getSourceAsMap(); - assertNotNull(enrichDocument); - assertThat(enrichDocument.size(), is(equalTo(1))); - Map resultDataField = ((Map) enrichDocument.get("data")); - assertNotNull(resultDataField); - Map resultFieldsField = ((Map) resultDataField.get("fields")); - assertNotNull(resultFieldsField); - assertThat(resultFieldsField.size(), is(equalTo(2))); - Map resultsPeriodField = ((Map) resultFieldsField.get("period")); - assertNotNull(periodField); - assertThat(resultsPeriodField.get("gte"), is(equalTo("2021/08/20 at 12:00"))); - assertThat(resultsPeriodField.get("lte"), is(equalTo("2021/08/28 at 23:00"))); - assertThat(resultFieldsField.get("status"), is(equalTo("enrolled"))); - assertNull(resultFieldsField.get("field3")); - - enrichSearchResponse = client().search( - new SearchRequest(".enrich-test1").source( - SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021/08/20 at 14:00")) - ) - ).actionGet(); - assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertResponse( + client().search( + new SearchRequest(".enrich-test1").source( + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("data.fields.period", "2021/08/20 at 14:00")) + ) + ), + enrichSearchResponse -> assertThat(enrichSearchResponse.getHits().getTotalHits().value, equalTo(1L)) + ); // Validate segments validateSegments(createdEnrichIndex, 1); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java index e986f6e9e0656..079af561e00c9 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java @@ -90,8 +90,10 @@ public void testCoordinateLookups() { // Replying a response and that should trigger another coordination round MultiSearchResponse.Item[] responseItems = new MultiSearchResponse.Item[5]; for (int i = 0; i < 5; i++) { + emptyResponse.incRef(); responseItems[i] = new MultiSearchResponse.Item(emptyResponse, null); } + emptyResponse.decRef(); final MultiSearchResponse res1 = new MultiSearchResponse(responseItems, 1L); try { lookupFunction.capturedConsumers.get(0).accept(res1, null); @@ -102,6 +104,7 @@ public void testCoordinateLookups() { // Replying last response, resulting in an empty queue and no outstanding requests. responseItems = new MultiSearchResponse.Item[5]; for (int i = 0; i < 5; i++) { + emptyResponse.incRef(); responseItems[i] = new MultiSearchResponse.Item(emptyResponse, null); } var res2 = new MultiSearchResponse(responseItems, 1L); @@ -318,7 +321,11 @@ public void testReduce() { Map> shardResponses = new HashMap<>(); try { - MultiSearchResponse.Item item1 = new MultiSearchResponse.Item(emptySearchResponse(), null); + var empty = emptySearchResponse(); + // use empty response 3 times below and we start out with ref-count 1 + empty.incRef(); + empty.incRef(); + MultiSearchResponse.Item item1 = new MultiSearchResponse.Item(empty, null); itemsPerIndex.put("index1", List.of(new Tuple<>(0, null), new Tuple<>(1, null), new Tuple<>(2, null))); shardResponses.put( "index1", @@ -329,7 +336,11 @@ public void testReduce() { itemsPerIndex.put("index2", List.of(new Tuple<>(3, null), new Tuple<>(4, null), new Tuple<>(5, null))); shardResponses.put("index2", new Tuple<>(null, failure)); - MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(emptySearchResponse(), null); + // use empty response 3 times below + empty.incRef(); + empty.incRef(); + empty.incRef(); + MultiSearchResponse.Item item2 = new MultiSearchResponse.Item(empty, null); itemsPerIndex.put("index3", List.of(new Tuple<>(6, null), new Tuple<>(7, null), new Tuple<>(8, null))); shardResponses.put( "index3", diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java index c3afa6a8b31f6..8a1b336bfa1e3 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorIndexService.java @@ -100,10 +100,7 @@ public void postConnector(Connector connector, ActionListener listener.onResponse(new PostConnectorAction.Response(indexResponse.getId())), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, indexResponse) -> l.onResponse(new PostConnectorAction.Response(indexResponse.getId()))) ); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 19c551d85617f..3fa4a90a6e734 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -88,4 +88,9 @@ public void closeInternal() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index e834a1c171e49..9d3f69bfaa981 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -89,4 +89,9 @@ public void closeInternal() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index 62319e9c100cb..b23a448c58336 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -88,4 +88,9 @@ public void closeInternal() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index ccc242dd1a573..028ef35577753 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -88,4 +88,9 @@ public void closeInternal() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 94697b3136fce..589a9341188fc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -88,4 +88,9 @@ public void closeInternal() { public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 91b6bb0ffac87..8772e633ff14b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -96,4 +96,9 @@ $endif$ public void allowPassingToDifferentDriver() { vector.allowPassingToDifferentDriver(); } + + @Override + public BlockFactory blockFactory() { + return vector.blockFactory(); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index a9c6666ce6f94..826c25f3e7828 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -44,7 +44,6 @@ import java.util.Objects; import java.util.TreeMap; import java.util.function.Supplier; -import java.util.stream.Collectors; /** * Operator that extracts doc_values from a Lucene index out of pages that have been produced by {@link LuceneSourceOperator} @@ -77,7 +76,22 @@ public Operator get(DriverContext driverContext) { @Override public String describe() { - return "ValuesSourceReaderOperator[field = " + fields.stream().map(f -> f.name).collect(Collectors.joining(", ")) + "]"; + StringBuilder sb = new StringBuilder(); + sb.append("ValuesSourceReaderOperator[fields = ["); + if (fields.size() < 10) { + boolean first = true; + for (FieldInfo f : fields) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append(f.name); + } + } else { + sb.append(fields.size()).append(" fields"); + } + return sb.append("]]").toString(); } } @@ -377,7 +391,22 @@ private LeafReaderContext ctx(int shard, int segment) { @Override public String toString() { - return "ValuesSourceReaderOperator[field = " + fields.stream().map(f -> f.info.name).collect(Collectors.joining(", ")) + "]"; + StringBuilder sb = new StringBuilder(); + sb.append("ValuesSourceReaderOperator[fields = ["); + if (fields.size() < 10) { + boolean first = true; + for (FieldWork f : fields) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append(f.info.name); + } + } else { + sb.append(fields.size()).append(" fields"); + } + return sb.append("]]").toString(); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java index 27b0380ecfea0..96e5de20ba35c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java @@ -648,6 +648,22 @@ public void testAllowPassingBlockToDifferentContext() throws Exception { localBreaker2.close(); } + public void testOwningFactoryOfVectorBlock() { + BlockFactory parentFactory = blockFactory(ByteSizeValue.ofBytes(between(1024, 4096))); + LocalCircuitBreaker localBreaker = new LocalCircuitBreaker(parentFactory.breaker(), between(0, 1024), between(0, 1024)); + BlockFactory localFactory = new BlockFactory(localBreaker, bigArrays, parentFactory); + int numValues = between(2, 10); + try (var builder = localFactory.newIntVectorBuilder(numValues)) { + for (int i = 0; i < numValues; i++) { + builder.appendInt(randomInt()); + } + IntBlock block = builder.build().asBlock(); + assertThat(block.blockFactory(), equalTo(localFactory)); + block.allowPassingToDifferentDriver(); + assertThat(block.blockFactory(), equalTo(parentFactory)); + } + } + static BytesRef randomBytesRef() { return new BytesRef(randomByteArrayOfLength(between(1, 20))); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 03815dcdaea78..f6310d826c989 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -245,7 +245,7 @@ private void initIndex(int size, int commitEvery) throws IOException { @Override protected String expectedDescriptionOfSimple() { - return "ValuesSourceReaderOperator[field = long]"; + return "ValuesSourceReaderOperator[fields = [long]]"; } @Override @@ -1354,4 +1354,18 @@ private void testSequentialStoredFields(boolean sequential, int docCount) { ); assertDriverContext(driverContext); } + + public void testDescriptionOfMany() { + List cases = infoAndChecksForEachType(randomFrom(Block.MvOrdering.values())); + + ValuesSourceReaderOperator.Factory factory = new ValuesSourceReaderOperator.Factory( + cases.stream().map(c -> c.info).toList(), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ); + assertThat(factory.describe(), equalTo("ValuesSourceReaderOperator[fields = [" + cases.size() + " fields]]")); + try (Operator op = factory.get(driverContext())) { + assertThat(op.toString(), equalTo("ValuesSourceReaderOperator[fields = [" + cases.size() + " fields]]")); + } + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index ef3e43aa6d8ab..95da19e38a05d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -569,7 +569,7 @@ message:keyword | ts:keyword | level:keyword // end::dissectRightPaddingModifier-result[] ; -dissectEmptyRightPaddingModifier +dissectEmptyRightPaddingModifier#[skip:-8.11.2, reason:Support for empty right padding modifiers introduced in 8.11.2] // tag::dissectEmptyRightPaddingModifier[] ROW message="[1998-08-10T17:15:42] [WARN]" | DISSECT message "[%{ts}]%{->}[%{level}]" diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 2d1d01e42b509..e499d3b783bb8 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -91,7 +91,7 @@ public void setupIndex() throws IOException { NUM_DOCS = between(4 * PAGE_SIZE, 5 * PAGE_SIZE); READ_DESCRIPTION = """ \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = PAGE_SIZE, limit = 2147483647] - \\_ValuesSourceReaderOperator[field = pause_me] + \\_ValuesSourceReaderOperator[fields = [pause_me]] \\_AggregationOperator[mode = INITIAL, aggs = sum of longs] \\_ExchangeSinkOperator""".replace("PAGE_SIZE", Integer.toString(PAGE_SIZE)); MERGE_DESCRIPTION = """ @@ -175,7 +175,7 @@ public void testTaskContents() throws Exception { luceneSources++; continue; } - if (o.operator().equals("ValuesSourceReaderOperator[field = pause_me]")) { + if (o.operator().equals("ValuesSourceReaderOperator[fields = [pause_me]]")) { ValuesSourceReaderOperator.Status oStatus = (ValuesSourceReaderOperator.Status) o.status(); assertMap( oStatus.readersBuilt(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index da305da3ea84d..945f543329c15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -151,18 +151,18 @@ public void lookupAsync( ) { ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); ActionListener listener = ContextPreservingActionListener.wrapPreservingContext(outListener, threadContext); - hasEnrichPrivilege(ActionListener.wrap(ignored -> { + hasEnrichPrivilege(listener.delegateFailureAndWrap((delegate, ignored) -> { ClusterState clusterState = clusterService.state(); GroupShardsIterator shardIterators = clusterService.operationRouting() .searchShards(clusterState, new String[] { index }, Map.of(), "_local"); if (shardIterators.size() != 1) { - listener.onFailure(new EsqlIllegalArgumentException("target index {} has more than one shard", index)); + delegate.onFailure(new EsqlIllegalArgumentException("target index {} has more than one shard", index)); return; } ShardIterator shardIt = shardIterators.get(0); ShardRouting shardRouting = shardIt.nextOrNull(); if (shardRouting == null) { - listener.onFailure(new UnavailableShardsException(shardIt.shardId(), "enrich index is not available")); + delegate.onFailure(new UnavailableShardsException(shardIt.shardId(), "enrich index is not available")); return; } DiscoveryNode targetNode = clusterState.nodes().get(shardRouting.currentNodeId()); @@ -176,13 +176,13 @@ public void lookupAsync( parentTask, TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>( - listener.map(LookupResponse::takePage), + delegate.map(LookupResponse::takePage), in -> new LookupResponse(in, blockFactory), executor ) ); } - }, listener::onFailure)); + })); } private void hasEnrichPrivilege(ActionListener outListener) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index d6f3b61cb19dd..79a7ed2a09e2c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -993,23 +993,33 @@ public static void renderTypesTable() throws IOException { if (System.getProperty("generateDocs") == null) { return; } - String name = functionName(); // TODO types table for operators + String name = functionName(); + if (binaryOperator(name) != null) { + renderTypesTable(List.of("lhs", "rhs")); + return; + } + if (unaryOperator(name) != null) { + renderTypesTable(List.of("v")); + return; + } FunctionDefinition definition = definition(name); - if (definition == null) { - LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); + if (definition != null) { + renderTypesTable(EsqlFunctionRegistry.description(definition).argNames()); return; } + LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); + } - List args = EsqlFunctionRegistry.description(definition).argNames(); + private static void renderTypesTable(List argNames) throws IOException { StringBuilder header = new StringBuilder(); - for (String arg : args) { + for (String arg : argNames) { header.append(arg).append(" | "); } header.append("result"); List table = new ArrayList<>(); for (Map.Entry, DataType> sig : signatures.entrySet()) { - if (sig.getKey().size() != args.size()) { + if (sig.getKey().size() != argNames.size()) { continue; } StringBuilder b = new StringBuilder(); @@ -1052,9 +1062,9 @@ private static String binaryOperator(String name) { case "div" -> "/"; case "equals" -> "=="; case "greater_than" -> ">"; - case "greater_than_or_equal_to" -> ">="; + case "greater_than_or_equal" -> ">="; case "less_than" -> "<"; - case "less_than_or_equal_to" -> "<="; + case "less_than_or_equal" -> "<="; case "mod" -> "%"; case "mul" -> "*"; case "not_equals" -> "!="; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 30de8ecae135b..6fce2646012af 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -77,6 +77,7 @@ public void testMatchAll() throws IOException { testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), false, false, this::runCase); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102997") public void testMatchSome() throws IOException { int max = between(1, 100); testCase( diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index e5c3faa47d910..0be3ecdd68257 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -68,6 +68,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -125,27 +126,39 @@ public void testCloseFreezeAndOpen() throws Exception { .setScroll(TimeValue.timeValueMinutes(1)) .setSize(1) .get(); - do { - assertHitCount(searchResponse, 3); - assertEquals(1, searchResponse.getHits().getHits().length); - SearchService searchService = getInstanceFromNode(SearchService.class); - assertThat(searchService.getActiveContexts(), Matchers.greaterThanOrEqualTo(1)); - for (int i = 0; i < 2; i++) { - shard = indexService.getShard(i); - engine = IndexShardTestCase.getEngine(shard); - // scrolls keep the reader open - assertTrue(((FrozenEngine) engine).isReaderOpen()); - } - searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); - } while (searchResponse.getHits().getHits().length > 0); + try { + do { + assertHitCount(searchResponse, 3); + assertEquals(1, searchResponse.getHits().getHits().length); + SearchService searchService = getInstanceFromNode(SearchService.class); + assertThat(searchService.getActiveContexts(), Matchers.greaterThanOrEqualTo(1)); + for (int i = 0; i < 2; i++) { + shard = indexService.getShard(i); + engine = IndexShardTestCase.getEngine(shard); + // scrolls keep the reader open + assertTrue(((FrozenEngine) engine).isReaderOpen()); + } + searchResponse.decRef(); + searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); + } while (searchResponse.getHits().getHits().length > 0); + } finally { + searchResponse.decRef(); + } client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); String pitId = openReaders(TimeValue.timeValueMinutes(1), indexName); try { for (int from = 0; from < 3; from++) { - searchResponse = client().prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setSize(1).setFrom(from).get(); - assertHitCount(searchResponse, 3); - assertEquals(1, searchResponse.getHits().getHits().length); + assertResponse( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitId)) + .setSize(1) + .setFrom(from), + response -> { + assertHitCount(response, 3); + assertEquals(1, response.getHits().getHits().length); + } + ); SearchService searchService = getInstanceFromNode(SearchService.class); assertThat(searchService.getActiveContexts(), Matchers.greaterThanOrEqualTo(1)); for (int i = 0; i < 2; i++) { @@ -192,7 +205,8 @@ public void testSearchAndGetAPIsAreThrottled() throws IOException { client().prepareSearch(indexName) .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .setSearchType(SearchType.QUERY_THEN_FETCH) - .get(); + .get() + .decRef(); // in total 4 refreshes 1x query & 1x fetch per shard (we have 2) numRefreshes += 3; } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java index 64193d7e1778a..6cc322e84822d 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java @@ -85,14 +85,14 @@ protected void doExecute( return; } logger.trace("Searching for existing ServiceProvider with id [{}] for [{}]", document.entityId, request); - index.findByEntityId(document.entityId, ActionListener.wrap(matchingDocuments -> { + index.findByEntityId(document.entityId, listener.delegateFailureAndWrap((delegate, matchingDocuments) -> { if (matchingDocuments.isEmpty()) { // derive a document id from the entity id so that don't accidentally create duplicate entities due to a race condition document.docId = deriveDocumentId(document); // force a create in case there are concurrent requests. This way, if two nodes/threads are trying to create the SP at // the same time, one will fail. That's not ideal, but it's better than having 1 silently overwrite the other. logger.trace("No existing ServiceProvider for EntityID=[{}], writing new doc [{}]", document.entityId, document.docId); - writeDocument(document, DocWriteRequest.OpType.CREATE, request.getRefreshPolicy(), listener); + writeDocument(document, DocWriteRequest.OpType.CREATE, request.getRefreshPolicy(), delegate); } else if (matchingDocuments.size() == 1) { final SamlServiceProviderDocument existingDoc = Iterables.get(matchingDocuments, 0).getDocument(); assert existingDoc.docId != null : "Loaded document with no doc id"; @@ -100,7 +100,7 @@ protected void doExecute( document.setDocId(existingDoc.docId); document.setCreated(existingDoc.created); logger.trace("Found existing ServiceProvider for EntityID=[{}], writing to doc [{}]", document.entityId, document.docId); - writeDocument(document, DocWriteRequest.OpType.INDEX, request.getRefreshPolicy(), listener); + writeDocument(document, DocWriteRequest.OpType.INDEX, request.getRefreshPolicy(), delegate); } else { logger.warn( "Found multiple existing service providers in [{}] with entity id [{}] - [{}]", @@ -108,11 +108,11 @@ protected void doExecute( document.entityId, matchingDocuments.stream().map(d -> d.getDocument().docId).collect(Collectors.joining(",")) ); - listener.onFailure( + delegate.onFailure( new IllegalStateException("Multiple service providers already exist with entity id [" + document.entityId + "]") ); } - }, listener::onFailure)); + })); } private void writeDocument( @@ -137,8 +137,8 @@ private void writeDocument( document, opType, refreshPolicy, - ActionListener.wrap( - response -> listener.onResponse( + listener.delegateFailureAndWrap( + (l, response) -> l.onResponse( new PutSamlServiceProviderResponse( response.getId(), response.getResult() == DocWriteResponse.Result.CREATED, @@ -147,8 +147,7 @@ private void writeDocument( document.entityId, document.enabled ) - ), - listener::onFailure + ) ) ); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java index f2b9c20c79d61..85afdc96e6344 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportSamlInitiateSingleSignOnAction.java @@ -181,12 +181,12 @@ private void buildUserFromAuthentication( ) { User user = secondaryAuthentication.getUser(); secondaryAuthentication.execute(ignore -> { - ActionListener wrapped = ActionListener.wrap(userPrivileges -> { + ActionListener wrapped = listener.delegateFailureAndWrap((delegate, userPrivileges) -> { if (userPrivileges.hasAccess == false) { - listener.onResponse(null); + delegate.onResponse(null); } else { logger.debug("Resolved [{}] for [{}]", userPrivileges, user); - listener.onResponse( + delegate.onResponse( new UserServiceAuthentication( user.principal(), user.fullName(), @@ -196,7 +196,7 @@ private void buildUserFromAuthentication( ) ); } - }, listener::onFailure); + }); privilegeResolver.resolve(serviceProvider.getPrivileges(), wrapped); return null; }); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index 202b52e0974d8..a84e35ce47f32 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -208,10 +208,10 @@ public void installIndexTemplate(ActionListener listener) { } final String template = TemplateUtils.loadTemplate(TEMPLATE_RESOURCE, Version.CURRENT.toString(), TEMPLATE_VERSION_SUBSTITUTE); final PutIndexTemplateRequest request = new PutIndexTemplateRequest(TEMPLATE_NAME).source(template, XContentType.JSON); - client.admin().indices().putTemplate(request, ActionListener.wrap(response -> { + client.admin().indices().putTemplate(request, listener.delegateFailureAndWrap((l, response) -> { logger.info("Installed template [{}]", TEMPLATE_NAME); - listener.onResponse(true); - }, listener::onFailure)); + l.onResponse(true); + })); } private boolean isTemplateUpToDate(ClusterState state) { @@ -223,10 +223,10 @@ public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy r .setIfSeqNo(version.seqNo) .setIfPrimaryTerm(version.primaryTerm) .setRefreshPolicy(refreshPolicy); - client.delete(request, ActionListener.wrap(response -> { + client.delete(request, listener.delegateFailureAndWrap((l, response) -> { logger.debug("Deleted service provider document [{}] ({})", version.id, response.getResult()); - listener.onResponse(response); - }, listener::onFailure)); + l.onResponse(response); + })); } public void writeDocument( @@ -244,9 +244,7 @@ public void writeDocument( if (templateInstalled) { _writeDocument(document, opType, refreshPolicy, listener); } else { - installIndexTemplate( - ActionListener.wrap(installed -> _writeDocument(document, opType, refreshPolicy, listener), listener::onFailure) - ); + installIndexTemplate(listener.delegateFailureAndWrap((l, installed) -> _writeDocument(document, opType, refreshPolicy, l))); } } @@ -268,7 +266,7 @@ private void _writeDocument( .source(xContentBuilder) .id(document.docId) .setRefreshPolicy(refreshPolicy); - client.index(request, ActionListener.wrap(response -> { + client.index(request, listener.delegateFailureAndWrap((l, response) -> { logger.debug( "Wrote service provider [{}][{}] as document [{}] ({})", document.name, @@ -276,8 +274,8 @@ private void _writeDocument( response.getId(), response.getResult() ); - listener.onResponse(response); - }, listener::onFailure)); + l.onResponse(response); + })); } catch (IOException e) { listener.onFailure(e); } @@ -285,15 +283,15 @@ private void _writeDocument( public void readDocument(String documentId, ActionListener listener) { final GetRequest request = new GetRequest(ALIAS_NAME, documentId); - client.get(request, ActionListener.wrap(response -> { + client.get(request, listener.delegateFailureAndWrap((l, response) -> { if (response.isExists()) { - listener.onResponse( + l.onResponse( new DocumentSupplier(new DocumentVersion(response), () -> toDocument(documentId, response.getSourceAsBytesRef())) ); } else { - listener.onResponse(null); + l.onResponse(null); } - }, listener::onFailure)); + })); } public void findByEntityId(String entityId, ActionListener> listener) { @@ -309,7 +307,7 @@ public void findAll(ActionListener> listener) { public void refresh(ActionListener listener) { client.admin() .indices() - .refresh(new RefreshRequest(ALIAS_NAME), ActionListener.wrap(response -> listener.onResponse(null), listener::onFailure)); + .refresh(new RefreshRequest(ALIAS_NAME), listener.delegateFailureAndWrap((l, response) -> l.onResponse(null))); } private void findDocuments(QueryBuilder query, ActionListener> listener) { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderResolver.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderResolver.java index e0c2fdc4100cf..4e3e76e539a95 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderResolver.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderResolver.java @@ -39,13 +39,13 @@ public SamlServiceProviderResolver( * service provider does not exist. */ public void resolve(String entityId, ActionListener listener) { - index.findByEntityId(entityId, ActionListener.wrap(documentSuppliers -> { + index.findByEntityId(entityId, listener.delegateFailureAndWrap((delegate, documentSuppliers) -> { if (documentSuppliers.isEmpty()) { - listener.onResponse(null); + delegate.onResponse(null); return; } if (documentSuppliers.size() > 1) { - listener.onFailure( + delegate.onFailure( new IllegalStateException( "Found multiple service providers with entity ID [" + entityId @@ -61,11 +61,11 @@ public void resolve(String entityId, ActionListener listene final DocumentSupplier doc = Iterables.get(documentSuppliers, 0); final CachedServiceProvider cached = cache.get(entityId); if (cached != null && cached.documentVersion.equals(doc.version)) { - listener.onResponse(cached.serviceProvider); + delegate.onResponse(cached.serviceProvider); } else { - populateCacheAndReturn(entityId, doc, listener); + populateCacheAndReturn(entityId, doc, delegate); } - }, listener::onFailure)); + })); } private void populateCacheAndReturn(String entityId, DocumentSupplier doc, ActionListener listener) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceModelAction.java index 88a364d1de8fe..ceb9fb92e3fab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportDeleteInferenceModelAction.java @@ -56,11 +56,8 @@ protected void masterOperation( DeleteInferenceModelAction.Request request, ClusterState state, ActionListener listener - ) throws Exception { - modelRegistry.deleteModel( - request.getModelId(), - ActionListener.wrap(r -> listener.onResponse(AcknowledgedResponse.TRUE), listener::onFailure) - ); + ) { + modelRegistry.deleteModel(request.getModelId(), listener.delegateFailureAndWrap((l, r) -> l.onResponse(AcknowledgedResponse.TRUE))); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java index 52fc115d4a4a6..a7f5fb6c6c9a0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java @@ -76,10 +76,10 @@ protected void doExecute( } private void getSingleModel(String modelId, TaskType requestedTaskType, ActionListener listener) { - modelRegistry.getModel(modelId, ActionListener.wrap(unparsedModel -> { + modelRegistry.getModel(modelId, listener.delegateFailureAndWrap((delegate, unparsedModel) -> { var service = serviceRegistry.getService(unparsedModel.service()); if (service.isEmpty()) { - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Unknown service [{}] for model [{}]. ", RestStatus.INTERNAL_SERVER_ERROR, @@ -91,7 +91,7 @@ private void getSingleModel(String modelId, TaskType requestedTaskType, ActionLi } if (requestedTaskType.isAnyOrSame(unparsedModel.taskType()) == false) { - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Requested task type [{}] does not match the model's task type [{}]", RestStatus.BAD_REQUEST, @@ -103,20 +103,20 @@ private void getSingleModel(String modelId, TaskType requestedTaskType, ActionLi } var model = service.get().parsePersistedConfig(unparsedModel.modelId(), unparsedModel.taskType(), unparsedModel.settings()); - listener.onResponse(new GetInferenceModelAction.Response(List.of(model.getConfigurations()))); - }, listener::onFailure)); + delegate.onResponse(new GetInferenceModelAction.Response(List.of(model.getConfigurations()))); + })); } private void getAllModels(ActionListener listener) { modelRegistry.getAllModels( - ActionListener.wrap(models -> executor.execute(ActionRunnable.supply(listener, () -> parseModels(models))), listener::onFailure) + listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) ); } private void getModelsByTaskType(TaskType taskType, ActionListener listener) { modelRegistry.getModelsByTaskType( taskType, - ActionListener.wrap(models -> executor.execute(ActionRunnable.supply(listener, () -> parseModels(models))), listener::onFailure) + listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java index 7fb86763ad534..db98aeccc556b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceAction.java @@ -42,10 +42,10 @@ public TransportInferenceAction( @Override protected void doExecute(Task task, InferenceAction.Request request, ActionListener listener) { - ActionListener getModelListener = ActionListener.wrap(unparsedModel -> { + ActionListener getModelListener = listener.delegateFailureAndWrap((delegate, unparsedModel) -> { var service = serviceRegistry.getService(unparsedModel.service()); if (service.isEmpty()) { - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Unknown service [{}] for model [{}]. ", RestStatus.INTERNAL_SERVER_ERROR, @@ -58,7 +58,7 @@ protected void doExecute(Task task, InferenceAction.Request request, ActionListe if (request.getTaskType().isAnyOrSame(unparsedModel.taskType()) == false) { // not the wildcard task type and not the model task type - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Incompatible task_type, the requested type [{}] does not match the model type [{}]", RestStatus.BAD_REQUEST, @@ -76,8 +76,8 @@ protected void doExecute(Task task, InferenceAction.Request request, ActionListe unparsedModel.settings(), unparsedModel.secrets() ); - inferOnService(model, request, service.get(), listener); - }, listener::onFailure); + inferOnService(model, request, service.get(), delegate); + }); modelRegistry.getModelWithSecrets(request.getModelId(), getModelListener); } @@ -88,8 +88,11 @@ private void inferOnService( InferenceService service, ActionListener listener ) { - service.infer(model, request.getInput(), request.getTaskSettings(), ActionListener.wrap(inferenceResults -> { - listener.onResponse(new InferenceAction.Response(inferenceResults)); - }, listener::onFailure)); + service.infer( + model, + request.getInput(), + request.getTaskSettings(), + listener.delegateFailureAndWrap((l, inferenceResults) -> l.onResponse(new InferenceAction.Response(inferenceResults))) + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageAction.java index 54452d8a7ed68..080d9b8c2eeca 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageAction.java @@ -62,9 +62,9 @@ protected void masterOperation( XPackUsageRequest request, ClusterState state, ActionListener listener - ) throws Exception { + ) { GetInferenceModelAction.Request getInferenceModelAction = new GetInferenceModelAction.Request("_all", TaskType.ANY); - client.execute(GetInferenceModelAction.INSTANCE, getInferenceModelAction, ActionListener.wrap(response -> { + client.execute(GetInferenceModelAction.INSTANCE, getInferenceModelAction, listener.delegateFailureAndWrap((delegate, response) -> { Map stats = new TreeMap<>(); for (ModelConfigurations model : response.getModels()) { String statKey = model.getService() + ":" + model.getTaskType().name(); @@ -75,7 +75,7 @@ protected void masterOperation( stat.add(); } InferenceFeatureSetUsage usage = new InferenceFeatureSetUsage(stats.values()); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - }, listener::onFailure)); + delegate.onResponse(new XPackUsageFeatureResponse(usage)); + })); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index de561846a7a68..d8f5b9424b162 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -131,7 +131,7 @@ protected void masterOperation( if (service.get().isInClusterService()) { // Find the cluster platform as the service may need that // information when creating the model - MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet(ActionListener.wrap(architectures -> { + MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet(listener.delegateFailureAndWrap((delegate, architectures) -> { if (architectures.isEmpty() && clusterIsInElasticCloud(clusterService.getClusterSettings())) { parseAndStoreModel( service.get(), @@ -140,13 +140,13 @@ protected void masterOperation( requestAsMap, // In Elastic cloud ml nodes run on Linux x86 Set.of("linux-x86_64"), - listener + delegate ); } else { // The architecture field could be an empty set, the individual services will need to handle that - parseAndStoreModel(service.get(), request.getModelId(), request.getTaskType(), requestAsMap, architectures, listener); + parseAndStoreModel(service.get(), request.getModelId(), request.getTaskType(), requestAsMap, architectures, delegate); } - }, listener::onFailure), client, threadPool.executor(InferencePlugin.UTILITY_THREAD_POOL_NAME)); + }), client, threadPool.executor(InferencePlugin.UTILITY_THREAD_POOL_NAME)); } else { // Not an in cluster service, it does not care about the cluster platform parseAndStoreModel(service.get(), request.getModelId(), request.getTaskType(), requestAsMap, Set.of(), listener); @@ -165,15 +165,12 @@ private void parseAndStoreModel( service.checkModelConfig( model, - ActionListener.wrap( + listener.delegateFailureAndWrap( // model is valid good to persist then start - verifiedModel -> { - modelRegistry.storeModel( - verifiedModel, - ActionListener.wrap(r -> { startModel(service, verifiedModel, listener); }, listener::onFailure) - ); - }, - listener::onFailure + (delegate, verifiedModel) -> modelRegistry.storeModel( + verifiedModel, + delegate.delegateFailureAndWrap((l, r) -> startModel(service, verifiedModel, l)) + ) ) ); } @@ -181,10 +178,7 @@ private void parseAndStoreModel( private static void startModel(InferenceService service, Model model, ActionListener listener) { service.start( model, - ActionListener.wrap( - ok -> listener.onResponse(new PutInferenceModelAction.Response(model.getConfigurations())), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, ok) -> l.onResponse(new PutInferenceModelAction.Response(model.getConfigurations()))) ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java index aa2e0a81a59b2..2de1cdc126bd6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java @@ -98,17 +98,16 @@ public ModelRegistry(Client client) { * @param listener Model listener */ public void getModelWithSecrets(String modelId, ActionListener listener) { - ActionListener searchListener = ActionListener.wrap(searchResponse -> { + ActionListener searchListener = listener.delegateFailureAndWrap((delegate, searchResponse) -> { // There should be a hit for the configurations and secrets if (searchResponse.getHits().getHits().length == 0) { - listener.onFailure(new ResourceNotFoundException("Model not found [{}]", modelId)); + delegate.onFailure(new ResourceNotFoundException("Model not found [{}]", modelId)); return; } var hits = searchResponse.getHits().getHits(); - listener.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(hits, modelId))); - - }, listener::onFailure); + delegate.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(hits, modelId))); + }); QueryBuilder queryBuilder = documentIdQuery(modelId); SearchRequest modelSearch = client.prepareSearch(InferenceIndex.INDEX_PATTERN, InferenceSecretsIndex.INDEX_PATTERN) @@ -126,19 +125,18 @@ public void getModelWithSecrets(String modelId, ActionListener li * @param listener Model listener */ public void getModel(String modelId, ActionListener listener) { - ActionListener searchListener = ActionListener.wrap(searchResponse -> { + ActionListener searchListener = listener.delegateFailureAndWrap((delegate, searchResponse) -> { // There should be a hit for the configurations and secrets if (searchResponse.getHits().getHits().length == 0) { - listener.onFailure(new ResourceNotFoundException("Model not found [{}]", modelId)); + delegate.onFailure(new ResourceNotFoundException("Model not found [{}]", modelId)); return; } var hits = searchResponse.getHits().getHits(); var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); assert modelConfigs.size() == 1; - listener.onResponse(modelConfigs.get(0)); - - }, listener::onFailure); + delegate.onResponse(modelConfigs.get(0)); + }); QueryBuilder queryBuilder = documentIdQuery(modelId); SearchRequest modelSearch = client.prepareSearch(InferenceIndex.INDEX_PATTERN) @@ -157,18 +155,17 @@ public void getModel(String modelId, ActionListener listener) { * @param listener Models listener */ public void getModelsByTaskType(TaskType taskType, ActionListener> listener) { - ActionListener searchListener = ActionListener.wrap(searchResponse -> { + ActionListener searchListener = listener.delegateFailureAndWrap((delegate, searchResponse) -> { // Not an error if no models of this task_type if (searchResponse.getHits().getHits().length == 0) { - listener.onResponse(List.of()); + delegate.onResponse(List.of()); return; } var hits = searchResponse.getHits().getHits(); var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); - listener.onResponse(modelConfigs); - - }, listener::onFailure); + delegate.onResponse(modelConfigs); + }); QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(TASK_TYPE_FIELD, taskType.toString())); @@ -188,18 +185,17 @@ public void getModelsByTaskType(TaskType taskType, ActionListener> listener) { - ActionListener searchListener = ActionListener.wrap(searchResponse -> { + ActionListener searchListener = listener.delegateFailureAndWrap((delegate, searchResponse) -> { // Not an error if no models of this task_type if (searchResponse.getHits().getHits().length == 0) { - listener.onResponse(List.of()); + delegate.onResponse(List.of()); return; } var hits = searchResponse.getHits().getHits(); var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); - listener.onResponse(modelConfigs); - - }, listener::onFailure); + delegate.onResponse(modelConfigs); + }); // In theory the index should only contain model config documents // and a match all query would be sufficient. But just in case the @@ -361,11 +357,7 @@ public void deleteModel(String modelId, ActionListener listener) { request.setQuery(documentIdQuery(modelId)); request.setRefresh(true); - client.execute( - DeleteByQueryAction.INSTANCE, - request, - ActionListener.wrap(r -> listener.onResponse(Boolean.TRUE), listener::onFailure) - ); + client.execute(DeleteByQueryAction.INSTANCE, request, listener.delegateFailureAndWrap((l, r) -> l.onResponse(Boolean.TRUE))); } private static IndexRequest createIndexRequest(String docId, String indexName, ToXContentObject body, boolean allowOverwriting) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index 6689229b35da2..1686cd32d4a6b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -218,20 +218,20 @@ public static ElasticsearchStatusException createInvalidModelException(Model mod public static void getEmbeddingSize(Model model, InferenceService service, ActionListener listener) { assert model.getTaskType() == TaskType.TEXT_EMBEDDING; - service.infer(model, List.of(TEST_EMBEDDING_INPUT), Map.of(), ActionListener.wrap(r -> { + service.infer(model, List.of(TEST_EMBEDDING_INPUT), Map.of(), listener.delegateFailureAndWrap((delegate, r) -> { if (r instanceof TextEmbeddingResults embeddingResults) { if (embeddingResults.embeddings().isEmpty()) { - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Could not determine embedding size, no embeddings were returned in test call", RestStatus.BAD_REQUEST ) ); } else { - listener.onResponse(embeddingResults.embeddings().get(0).values().size()); + delegate.onResponse(embeddingResults.embeddings().get(0).values().size()); } } else { - listener.onFailure( + delegate.onFailure( new ElasticsearchStatusException( "Could not determine embedding size. " + "Expected a result of type [" @@ -243,7 +243,7 @@ public static void getEmbeddingSize(Model model, InferenceService service, Actio ) ); } - }, listener::onFailure)); + })); } private static final String TEST_EMBEDDING_INPUT = "how big"; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index dc189352c8fc4..2f2780dfd77fb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -52,10 +52,7 @@ public void checkModelConfig(Model model, ActionListener listener) { ServiceUtils.getEmbeddingSize( model, this, - ActionListener.wrap( - size -> listener.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size)), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) ); } else { listener.onResponse(model); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 8a2f6295b41c8..85c2a99d530a1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -149,10 +149,7 @@ public void checkModelConfig(Model model, ActionListener listener) { ServiceUtils.getEmbeddingSize( model, this, - ActionListener.wrap( - size -> listener.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size)), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) ); } else { listener.onResponse(model); diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index 7f1a0f2bcc2cb..d93c24356422f 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -128,41 +128,45 @@ public void testGetPipelinesByExplicitAndWildcardIds() { SearchResponse.Clusters.EMPTY, null ); + try { + + SearchResponse mockResponse = mock(SearchResponse.class); + when(mockResponse.getHits()).thenReturn(prepareSearchHits()); + + GetPipelineRequest request = new GetPipelineRequest(List.of("1", "2", "3*")); + AtomicReference failure = new AtomicReference<>(); + + // Set up an ActionListener for the actual test conditions + ActionListener testActionListener = new ActionListener<>() { + @Override + public void onResponse(GetPipelineResponse getPipelineResponse) { + assertThat(getPipelineResponse, is(notNullValue())); + assertThat(getPipelineResponse.pipelines().size(), equalTo(3)); + assertTrue(getPipelineResponse.pipelines().containsKey("1")); + assertTrue(getPipelineResponse.pipelines().containsKey("2")); + assertTrue(getPipelineResponse.pipelines().containsKey("3*")); + } - SearchResponse mockResponse = mock(SearchResponse.class); - when(mockResponse.getHits()).thenReturn(prepareSearchHits()); - - GetPipelineRequest request = new GetPipelineRequest(List.of("1", "2", "3*")); - AtomicReference failure = new AtomicReference<>(); - - // Set up an ActionListener for the actual test conditions - ActionListener testActionListener = new ActionListener<>() { - @Override - public void onResponse(GetPipelineResponse getPipelineResponse) { - assertThat(getPipelineResponse, is(notNullValue())); - assertThat(getPipelineResponse.pipelines().size(), equalTo(3)); - assertTrue(getPipelineResponse.pipelines().containsKey("1")); - assertTrue(getPipelineResponse.pipelines().containsKey("2")); - assertTrue(getPipelineResponse.pipelines().containsKey("3*")); - } + @Override + public void onFailure(Exception e) { + failure.set(e); + } + }; - @Override - public void onFailure(Exception e) { - failure.set(e); + TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); + try (var threadPool = createThreadPool()) { + final var client = getMockClient(threadPool, searchResponse); + new TransportGetPipelineAction(transportService, mock(ActionFilters.class), client).doExecute( + null, + request, + testActionListener + ); } - }; - TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); - try (var threadPool = createThreadPool()) { - final var client = getMockClient(threadPool, searchResponse); - new TransportGetPipelineAction(transportService, mock(ActionFilters.class), client).doExecute( - null, - request, - testActionListener - ); + assertNull(failure.get()); + } finally { + searchResponse.decRef(); } - - assertNull(failure.get()); } public void testMissingIndexHandling() throws Exception { diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 24ad009990b0b..2da4e2802bdbe 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -40,6 +40,7 @@ import java.util.Locale; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -130,12 +131,13 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept assertIdleShard(beforeStatsResponse); // WHEN - final SearchResponse searchResponse = search("test*", "constant_keyword", randomAlphaOfLength(5), 5); - assertEquals(RestStatus.OK, searchResponse.status()); - // NOTE: we need an empty result from at least one shard - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - assertEquals(0, searchResponse.getHits().getHits().length); + assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> { + assertEquals(RestStatus.OK, searchResponse.status()); + // NOTE: we need an empty result from at least one shard + assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(0, searchResponse.getHits().getHits().length); + }); // THEN final IndicesStatsResponse afterStatsResponse = indicesAdmin().prepareStats("test*").get(); @@ -202,11 +204,12 @@ public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedExcep assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = search("test*", "constant_keyword", "constant_value2", 5); - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + assertResponse(search("test*", "constant_keyword", "constant_value2", 5), searchResponse -> { + assertEquals(RestStatus.OK, searchResponse.status()); + assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getFailedShards()); + Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + }); // THEN final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); @@ -265,18 +268,19 @@ public void testSearchIdleConstantKeywordMatchTwoIndices() throws InterruptedExc assertIdleShard(beforeStatsResponse); // WHEN - final SearchResponse searchResponse = search("test*", "constant_keyword", "constant", 5); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(0, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - assertArrayEquals( - new String[] { "test1", "test2" }, - Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getIndex).sorted().toArray() - ); - final IndicesStatsResponse afterStatsResponse = indicesAdmin().prepareStats("test*").get(); - assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); + assertResponse(search("test*", "constant_keyword", "constant", 5), searchResponse -> { + + // THEN + assertEquals(RestStatus.OK, searchResponse.status()); + assertEquals(0, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getFailedShards()); + assertArrayEquals( + new String[] { "test1", "test2" }, + Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getIndex).sorted().toArray() + ); + final IndicesStatsResponse afterStatsResponse = indicesAdmin().prepareStats("test*").get(); + assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); + }); } public void testSearchIdleWildcardQueryMatchOneIndex() throws InterruptedException { @@ -327,16 +331,17 @@ public void testSearchIdleWildcardQueryMatchOneIndex() throws InterruptedExcepti assertIdleShard(activeIndexStatsBefore); // WHEN - final SearchResponse searchResponse = client().prepareSearch("test*") - .setQuery(new WildcardQueryBuilder("constant_keyword", "test2*")) - .setPreFilterShardSize(5) - .get(); - - // THEN - assertEquals(RestStatus.OK, searchResponse.status()); - assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getFailedShards()); - Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + assertResponse( + client().prepareSearch("test*").setQuery(new WildcardQueryBuilder("constant_keyword", "test2*")).setPreFilterShardSize(5), + searchResponse -> { + + // THEN + assertEquals(RestStatus.OK, searchResponse.status()); + assertEquals(idleIndexShardsCount, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getFailedShards()); + Arrays.stream(searchResponse.getHits().getHits()).forEach(searchHit -> assertEquals("test2", searchHit.getIndex())); + } + ); final IndicesStatsResponse idleIndexStatsAfter = indicesAdmin().prepareStats(idleIndex).get(); assertIdleShardsRefreshStats(idleIndexStatsBefore, idleIndexStatsAfter); @@ -345,11 +350,8 @@ public void testSearchIdleWildcardQueryMatchOneIndex() throws InterruptedExcepti assertThat(active, empty()); } - private SearchResponse search(final String index, final String field, final String value, int preFilterShardSize) { - return client().prepareSearch(index) - .setQuery(new MatchPhraseQueryBuilder(field, value)) - .setPreFilterShardSize(preFilterShardSize) - .get(); + private SearchRequestBuilder search(final String index, final String field, final String value, int preFilterShardSize) { + return client().prepareSearch(index).setQuery(new MatchPhraseQueryBuilder(field, value)).setPreFilterShardSize(preFilterShardSize); } private static void assertIdleShard(final IndicesStatsResponse statsResponse) { diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java index 3407a71f23265..24335a7892c9d 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -41,10 +40,10 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.SuiteScopeTestCase public class UnsignedLongTests extends ESIntegTestCase { @@ -98,78 +97,74 @@ public void setupSuiteScopeCluster() throws Exception { public void testSort() { for (String index : new String[] { "idx", "idx-sort" }) { // asc sort - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) - .setSize(numDocs) - .addSort("ul_field", SortOrder.ASC) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, numDocs); - int i = 0; - for (SearchHit hit : hits) { - assertEquals(values[i++], hit.getSortValues()[0]); + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).setSize(numDocs).addSort("ul_field", SortOrder.ASC), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, numDocs); + int i = 0; + for (SearchHit hit : hits) { + assertEquals(values[i++], hit.getSortValues()[0]); + } } - } + ); // desc sort - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) - .setSize(numDocs) - .addSort("ul_field", SortOrder.DESC) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, numDocs); - int i = numDocs - 1; - for (SearchHit hit : hits) { - assertEquals(values[i--], hit.getSortValues()[0]); + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).setSize(numDocs).addSort("ul_field", SortOrder.DESC), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, numDocs); + int i = numDocs - 1; + for (SearchHit hit : hits) { + assertEquals(values[i--], hit.getSortValues()[0]); + } } - } + ); // asc sort with search_after as Long - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) .setSize(numDocs) .addSort("ul_field", SortOrder.ASC) - .searchAfter(new Long[] { 100L }) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, 7); - int i = 3; - for (SearchHit hit : hits) { - assertEquals(values[i++], hit.getSortValues()[0]); + .searchAfter(new Long[] { 100L }), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, 7); + int i = 3; + for (SearchHit hit : hits) { + assertEquals(values[i++], hit.getSortValues()[0]); + } } - } + ); // asc sort with search_after as BigInteger - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) .setSize(numDocs) .addSort("ul_field", SortOrder.ASC) - .searchAfter(new BigInteger[] { new BigInteger("18446744073709551614") }) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, 2); - int i = 8; - for (SearchHit hit : hits) { - assertEquals(values[i++], hit.getSortValues()[0]); + .searchAfter(new BigInteger[] { new BigInteger("18446744073709551614") }), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, 2); + int i = 8; + for (SearchHit hit : hits) { + assertEquals(values[i++], hit.getSortValues()[0]); + } } - } + ); // asc sort with search_after as BigInteger in String format - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) .setSize(numDocs) .addSort("ul_field", SortOrder.ASC) - .searchAfter(new String[] { "18446744073709551614" }) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, 2); - int i = 8; - for (SearchHit hit : hits) { - assertEquals(values[i++], hit.getSortValues()[0]); + .searchAfter(new String[] { "18446744073709551614" }), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, 2); + int i = 8; + for (SearchHit hit : hits) { + assertEquals(values[i++], hit.getSortValues()[0]); + } } - } + ); // asc sort with search_after of negative value should fail { SearchRequestBuilder srb = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) @@ -189,28 +184,26 @@ public void testSort() { assertThat(exception.getCause().getMessage(), containsString("Failed to parse search_after value")); } // desc sort with search_after as BigInteger - { - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) + assertNoFailuresAndResponse( + prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()) .setSize(numDocs) .addSort("ul_field", SortOrder.DESC) - .searchAfter(new BigInteger[] { new BigInteger("18446744073709551615") }) - .get(); - assertNoFailures(response); - SearchHit[] hits = response.getHits().getHits(); - assertEquals(hits.length, 8); - int i = 7; - for (SearchHit hit : hits) { - assertEquals(values[i--], hit.getSortValues()[0]); + .searchAfter(new BigInteger[] { new BigInteger("18446744073709551615") }), + response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(hits.length, 8); + int i = 7; + for (SearchHit hit : hits) { + assertEquals(values[i--], hit.getSortValues()[0]); + } } - } + ); } } public void testAggs() { // terms agg - { - SearchResponse response = prepareSearch("idx").setSize(0).addAggregation(terms("ul_terms").field("ul_field")).get(); - assertNoFailures(response); + assertNoFailuresAndResponse(prepareSearch("idx").setSize(0).addAggregation(terms("ul_terms").field("ul_field")), response -> { Terms terms = response.getAggregations().get("ul_terms"); long[] expectedBucketDocCounts = { 2, 2, 2, 1, 1, 1, 1 }; @@ -228,68 +221,62 @@ public void testAggs() { assertEquals(expectedBucketKeys[i], bucket.getKey()); i++; } - } + }); // histogram agg - { - SearchResponse response = prepareSearch("idx").setSize(0) - .addAggregation(histogram("ul_histo").field("ul_field").interval(9E18).minDocCount(0)) - .get(); - assertNoFailures(response); - Histogram histo = response.getAggregations().get("ul_histo"); + assertNoFailuresAndResponse( + prepareSearch("idx").setSize(0).addAggregation(histogram("ul_histo").field("ul_field").interval(9E18).minDocCount(0)), + response -> { + Histogram histo = response.getAggregations().get("ul_histo"); - long[] expectedBucketDocCounts = { 3, 3, 4 }; - double[] expectedBucketKeys = { 0, 9.0E18, 1.8E19 }; - int i = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - assertEquals(expectedBucketDocCounts[i], bucket.getDocCount()); - assertEquals(expectedBucketKeys[i], bucket.getKey()); - i++; + long[] expectedBucketDocCounts = { 3, 3, 4 }; + double[] expectedBucketKeys = { 0, 9.0E18, 1.8E19 }; + int i = 0; + for (Histogram.Bucket bucket : histo.getBuckets()) { + assertEquals(expectedBucketDocCounts[i], bucket.getDocCount()); + assertEquals(expectedBucketKeys[i], bucket.getKey()); + i++; + } } - } + ); // range agg - { - SearchResponse response = prepareSearch("idx").setSize(0) + assertNoFailuresAndResponse( + prepareSearch("idx").setSize(0) .addAggregation( range("ul_range").field("ul_field").addUnboundedTo(9.0E18).addRange(9.0E18, 1.8E19).addUnboundedFrom(1.8E19) - ) - .get(); - assertNoFailures(response); - Range range = response.getAggregations().get("ul_range"); + ), + response -> { + Range range = response.getAggregations().get("ul_range"); - long[] expectedBucketDocCounts = { 3, 3, 4 }; - String[] expectedBucketKeys = { "*-9.0E18", "9.0E18-1.8E19", "1.8E19-*" }; - int i = 0; - for (Range.Bucket bucket : range.getBuckets()) { - assertEquals(expectedBucketDocCounts[i], bucket.getDocCount()); - assertEquals(expectedBucketKeys[i], bucket.getKey()); - i++; + long[] expectedBucketDocCounts = { 3, 3, 4 }; + String[] expectedBucketKeys = { "*-9.0E18", "9.0E18-1.8E19", "1.8E19-*" }; + int i = 0; + for (Range.Bucket bucket : range.getBuckets()) { + assertEquals(expectedBucketDocCounts[i], bucket.getDocCount()); + assertEquals(expectedBucketKeys[i], bucket.getKey()); + i++; + } } - } + ); // sum agg - { - SearchResponse response = prepareSearch("idx").setSize(0).addAggregation(sum("ul_sum").field("ul_field")).get(); - assertNoFailures(response); + assertNoFailuresAndResponse(prepareSearch("idx").setSize(0).addAggregation(sum("ul_sum").field("ul_field")), response -> { Sum sum = response.getAggregations().get("ul_sum"); double expectedSum = Arrays.stream(values).mapToDouble(Number::doubleValue).sum(); assertEquals(expectedSum, sum.value(), 0.001); - } + }); // max agg - { - SearchResponse response = prepareSearch("idx").setSize(0).addAggregation(max("ul_max").field("ul_field")).get(); - assertNoFailures(response); + assertNoFailuresAndResponse(prepareSearch("idx").setSize(0).addAggregation(max("ul_max").field("ul_field")), response -> { Max max = response.getAggregations().get("ul_max"); assertEquals(1.8446744073709551615E19, max.value(), 0.001); - } + }); + // min agg - { - SearchResponse response = prepareSearch("idx").setSize(0).addAggregation(min("ul_min").field("ul_field")).get(); - assertNoFailures(response); + assertNoFailuresAndResponse(prepareSearch("idx").setSize(0).addAggregation(min("ul_min").field("ul_field")), response -> { Min min = response.getAggregations().get("ul_min"); assertEquals(0, min.value(), 0.001); - } + }); } public void testSortDifferentFormatsShouldFail() { @@ -304,15 +291,11 @@ public void testSortDifferentFormatsShouldFail() { } public void testRangeQuery() { - SearchResponse response = prepareSearch("idx").setSize(0) - .setQuery(new RangeQueryBuilder("ul_field").to("9.0E18").includeUpper(false)) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - response = prepareSearch("idx").setSize(0) - .setQuery(new RangeQueryBuilder("ul_field").from("9.0E18").to("1.8E19").includeUpper(false)) - .get(); - assertThat(response.getHits().getTotalHits().value, equalTo(3L)); - response = prepareSearch("idx").setSize(0).setQuery(new RangeQueryBuilder("ul_field").from("1.8E19")).get(); - assertThat(response.getHits().getTotalHits().value, equalTo(4L)); + assertHitCount(prepareSearch("idx").setSize(0).setQuery(new RangeQueryBuilder("ul_field").to("9.0E18").includeUpper(false)), 3); + assertHitCount( + prepareSearch("idx").setSize(0).setQuery(new RangeQueryBuilder("ul_field").from("9.0E18").to("1.8E19").includeUpper(false)), + 3 + ); + assertHitCount(prepareSearch("idx").setSize(0).setQuery(new RangeQueryBuilder("ul_field").from("1.8E19")), 4); } } diff --git a/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java b/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java index 0dc7ca8006f8a..b5f172d26ffe7 100644 --- a/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java +++ b/x-pack/plugin/mapper-version/src/internalClusterTest/java/org/elasticsearch/xpack/versionfield/VersionFieldIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.versionfield; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -24,6 +23,7 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.contains; @@ -67,18 +67,20 @@ public void testTermsAggregation() throws Exception { indicesAdmin().prepareRefresh().get(); // terms aggs - SearchResponse response = client().prepareSearch(indexName) - .addAggregation(AggregationBuilders.terms("myterms").field("version")) - .get(); - Terms terms = response.getAggregations().get("myterms"); - List buckets = terms.getBuckets(); - - assertEquals(5, buckets.size()); - assertEquals("1.0", buckets.get(0).getKey()); - assertEquals("1.3.0", buckets.get(1).getKey()); - assertEquals("2.1.0-alpha", buckets.get(2).getKey()); - assertEquals("2.1.0", buckets.get(3).getKey()); - assertEquals("3.11.5", buckets.get(4).getKey()); + assertResponse( + client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("myterms").field("version")), + response -> { + Terms terms = response.getAggregations().get("myterms"); + List buckets = terms.getBuckets(); + + assertEquals(5, buckets.size()); + assertEquals("1.0", buckets.get(0).getKey()); + assertEquals("1.3.0", buckets.get(1).getKey()); + assertEquals("2.1.0-alpha", buckets.get(2).getKey()); + assertEquals("2.1.0", buckets.get(3).getKey()); + assertEquals("3.11.5", buckets.get(4).getKey()); + } + ); } public void testTermsEnum() throws Exception { diff --git a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java index 038f2b54965ce..f41cc145831cf 100644 --- a/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java +++ b/x-pack/plugin/mapper-version/src/test/java/org/elasticsearch/xpack/versionfield/VersionStringFieldTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.versionfield; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -27,6 +26,8 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; public class VersionStringFieldTests extends ESSingleNodeTestCase { @@ -55,108 +56,71 @@ public void testExactQueries() throws Exception { setUpIndex(indexName); // match - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", ("1.0.0"))).get(); - assertEquals(1, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.4.0")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.3.0")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.3.0+build.1234567")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", ("1.0.0"))), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.4.0")), 0); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.3.0")), 0); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.3.0+build.1234567")), 1); // term - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.0.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.4.0")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.3.0")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.3.0+build.1234567")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.0.0")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.4.0")), 0); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.3.0")), 0); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termQuery("version", "1.3.0+build.1234567")), 1); // terms - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termsQuery("version", "1.0.0", "1.3.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.termsQuery("version", "1.4.0", "1.3.0+build.1234567")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termsQuery("version", "1.0.0", "1.3.0")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.termsQuery("version", "1.4.0", "1.3.0+build.1234567")), 1); // phrase query (just for keyword compatibility) - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchPhraseQuery("version", "2.1.0-alpha.beta")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchPhraseQuery("version", "2.1.0-alpha.beta")), 1); } public void testRangeQueries() throws Exception { String indexName = setUpIndex("test"); - SearchResponse response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.rangeQuery("version").from("1.0.0").to("3.0.0")) - .get(); - assertEquals(4, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.1.0").to("3.0.0")).get(); - assertEquals(3, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.rangeQuery("version").from("0.1.0").to("2.1.0-alpha.beta")) - .get(); - assertEquals(3, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("2.1.0").to("3.0.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("3.0.0").to("4.0.0")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.rangeQuery("version").from("1.3.0+build.1234569").to("3.0.0")) - .get(); - assertEquals(2, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.0.0").to("3.0.0")), 4); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.1.0").to("3.0.0")), 3); + assertHitCount( + client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("0.1.0").to("2.1.0-alpha.beta")), + 3 + ); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("2.1.0").to("3.0.0")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("3.0.0").to("4.0.0")), 0); + assertHitCount( + client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.3.0+build.1234569").to("3.0.0")), + 2 + ); // ranges excluding edges - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.0.0", false).to("3.0.0")).get(); - assertEquals(3, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.0.0").to("2.1.0", false)).get(); - assertEquals(3, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.0.0", false).to("3.0.0")), 3); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.0.0").to("2.1.0", false)), 3); // open ranges - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.4.0")).get(); - assertEquals(4, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("1.4.0")).get(); - assertEquals(2, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.4.0")), 4); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("1.4.0")), 2); } public void testPrefixQuery() throws IOException { String indexName = setUpIndex("test"); // prefix - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "1")).get(); - assertEquals(3, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1")).get(); - assertEquals(2, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "1.3.0+b")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2")).get(); - assertEquals(3, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.1")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.11")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "1")), 3); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1")), 2); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "1.3.0+b")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2")), 3); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.1")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "21.11")), 1); // test case sensitivity / insensitivity - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-A")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-A").caseInsensitive(true)).get(); - assertEquals(1, response.getHits().getTotalHits().value); - assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-A")), 0); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.prefixQuery("version", "2.1.0-A").caseInsensitive(true)), + response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[0].getSourceAsMap().get("version")); + } + ); } public void testSort() throws IOException { @@ -167,32 +131,37 @@ public void testSort() throws IOException { client().admin().indices().prepareRefresh(indexName).get(); // sort based on version field - SearchResponse response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.matchAllQuery()) - .addSort("version", SortOrder.DESC) - .get(); - assertEquals(8, response.getHits().getTotalHits().value); - SearchHit[] hits = response.getHits().getHits(); - assertEquals("1.3.567#12", hits[0].getSortValues()[0]); - assertEquals("1.2.3alpha", hits[1].getSortValues()[0]); - assertEquals("21.11.0", hits[2].getSortValues()[0]); - assertEquals("11.1.0", hits[3].getSortValues()[0]); - assertEquals("2.1.0", hits[4].getSortValues()[0]); - assertEquals("2.1.0-alpha.beta", hits[5].getSortValues()[0]); - assertEquals("1.3.0+build.1234567", hits[6].getSortValues()[0]); - assertEquals("1.0.0", hits[7].getSortValues()[0]); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()).addSort("version", SortOrder.ASC).get(); - assertEquals(8, response.getHits().getTotalHits().value); - hits = response.getHits().getHits(); - assertEquals("1.0.0", hits[0].getSortValues()[0]); - assertEquals("1.3.0+build.1234567", hits[1].getSortValues()[0]); - assertEquals("2.1.0-alpha.beta", hits[2].getSortValues()[0]); - assertEquals("2.1.0", hits[3].getSortValues()[0]); - assertEquals("11.1.0", hits[4].getSortValues()[0]); - assertEquals("21.11.0", hits[5].getSortValues()[0]); - assertEquals("1.2.3alpha", hits[6].getSortValues()[0]); - assertEquals("1.3.567#12", hits[7].getSortValues()[0]); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()).addSort("version", SortOrder.DESC), + response -> { + assertEquals(8, response.getHits().getTotalHits().value); + SearchHit[] hits = response.getHits().getHits(); + assertEquals("1.3.567#12", hits[0].getSortValues()[0]); + assertEquals("1.2.3alpha", hits[1].getSortValues()[0]); + assertEquals("21.11.0", hits[2].getSortValues()[0]); + assertEquals("11.1.0", hits[3].getSortValues()[0]); + assertEquals("2.1.0", hits[4].getSortValues()[0]); + assertEquals("2.1.0-alpha.beta", hits[5].getSortValues()[0]); + assertEquals("1.3.0+build.1234567", hits[6].getSortValues()[0]); + assertEquals("1.0.0", hits[7].getSortValues()[0]); + } + ); + + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()).addSort("version", SortOrder.ASC), + response -> { + assertEquals(8, response.getHits().getTotalHits().value); + var hits = response.getHits().getHits(); + assertEquals("1.0.0", hits[0].getSortValues()[0]); + assertEquals("1.3.0+build.1234567", hits[1].getSortValues()[0]); + assertEquals("2.1.0-alpha.beta", hits[2].getSortValues()[0]); + assertEquals("2.1.0", hits[3].getSortValues()[0]); + assertEquals("11.1.0", hits[4].getSortValues()[0]); + assertEquals("21.11.0", hits[5].getSortValues()[0]); + assertEquals("1.2.3alpha", hits[6].getSortValues()[0]); + assertEquals("1.3.567#12", hits[7].getSortValues()[0]); + } + ); } public void testRegexQuery() throws Exception { @@ -209,36 +178,44 @@ public void testRegexQuery() throws Exception { prepareIndex(indexName).setId("5").setSource(jsonBuilder().startObject().field("version", "2.33.0").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "2.*0")).get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("2.1.0", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.33.0", response.getHits().getHits()[1].getSourceAsMap().get("version")); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "<0-10>.<0-10>.*al.*")).get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "2.*0")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("2.1.0", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.33.0", response.getHits().getHits()[1].getSourceAsMap().get("version")); + }); + + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "<0-10>.<0-10>.*al.*")), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + } + ); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "1.[0-9].[0-9].*")).get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("1.3.0+build.1234567", response.getHits().getHits()[1].getSourceAsMap().get("version")); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", "1.[0-9].[0-9].*")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("1.3.0+build.1234567", response.getHits().getHits()[1].getSourceAsMap().get("version")); + }); // test case sensitivity / insensitivity - response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", ".*alpha.*")).get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", ".*Alpha.*")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.regexpQuery("version", ".*Alpha.*").caseInsensitive(true)) - .get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", ".*alpha.*")), response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + }); + + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", ".*Alpha.*")), 0); + + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.regexpQuery("version", ".*Alpha.*").caseInsensitive(true)), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("1.0.0alpha2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + } + ); } public void testFuzzyQuery() throws Exception { @@ -256,11 +233,12 @@ public void testFuzzyQuery() throws Exception { prepareIndex(indexName).setId("6").setSource(jsonBuilder().startObject().field("version", "2.a3.0").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.fuzzyQuery("version", "2.3.0")).get(); - assertEquals(3, response.getHits().getTotalHits().value); - assertEquals("2.1.0", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.33.0", response.getHits().getHits()[1].getSourceAsMap().get("version")); - assertEquals("2.a3.0", response.getHits().getHits()[2].getSourceAsMap().get("version")); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.fuzzyQuery("version", "2.3.0")), response -> { + assertEquals(3, response.getHits().getTotalHits().value); + assertEquals("2.1.0", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.33.0", response.getHits().getHits()[1].getSourceAsMap().get("version")); + assertEquals("2.a3.0", response.getHits().getHits()[2].getSourceAsMap().get("version")); + }); } public void testWildcardQuery() throws Exception { @@ -305,25 +283,27 @@ public void testWildcardQuery() throws Exception { checkWildcardQuery(indexName, "3.1.1??", new String[] { "3.1.1-a", "3.1.1+b", "3.1.123" }); // test case sensitivity / insensitivity - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.wildcardQuery("version", "*Alpha*")).get(); - assertEquals(0, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName) - .setQuery(QueryBuilders.wildcardQuery("version", "*Alpha*").caseInsensitive(true)) - .get(); - assertEquals(2, response.getHits().getTotalHits().value); - assertEquals("1.0.0-alpha.2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); - assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.wildcardQuery("version", "*Alpha*")), 0); + + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.wildcardQuery("version", "*Alpha*").caseInsensitive(true)), + response -> { + assertEquals(2, response.getHits().getTotalHits().value); + assertEquals("1.0.0-alpha.2.1.0-rc.1", response.getHits().getHits()[0].getSourceAsMap().get("version")); + assertEquals("2.1.0-alpha.beta", response.getHits().getHits()[1].getSourceAsMap().get("version")); + } + ); } private void checkWildcardQuery(String indexName, String query, String... expectedResults) { - SearchResponse response = client().prepareSearch(indexName).setQuery(QueryBuilders.wildcardQuery("version", query)).get(); - assertEquals(expectedResults.length, response.getHits().getTotalHits().value); - for (int i = 0; i < expectedResults.length; i++) { - String expected = expectedResults[i]; - Object actual = response.getHits().getHits()[i].getSourceAsMap().get("version"); - assertEquals("expected " + expected + " in position " + i + " but found " + actual, expected, actual); - } + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.wildcardQuery("version", query)), response -> { + assertEquals(expectedResults.length, response.getHits().getTotalHits().value); + for (int i = 0; i < expectedResults.length; i++) { + String expected = expectedResults[i]; + Object actual = response.getHits().getHits()[i].getSourceAsMap().get("version"); + assertEquals("expected " + expected + " in position " + i + " but found " + actual, expected, actual); + } + }); } /** @@ -340,59 +320,60 @@ public void testStoreMalformed() throws Exception { prepareIndex(indexName).setId("4").setSource(jsonBuilder().startObject().field("version", "").endObject()).get(); client().admin().indices().prepareRefresh(indexName).get(); - SearchResponse response = client().prepareSearch(indexName).addDocValueField("version").get(); - assertEquals(4, response.getHits().getTotalHits().value); - assertEquals("1", response.getHits().getAt(0).getId()); - assertEquals("1.invalid.0", response.getHits().getAt(0).field("version").getValue()); + assertResponse(client().prepareSearch(indexName).addDocValueField("version"), response -> { + assertEquals(4, response.getHits().getTotalHits().value); + assertEquals("1", response.getHits().getAt(0).getId()); + assertEquals("1.invalid.0", response.getHits().getAt(0).field("version").getValue()); - assertEquals("2", response.getHits().getAt(1).getId()); - assertEquals("2.2.0", response.getHits().getAt(1).field("version").getValue()); + assertEquals("2", response.getHits().getAt(1).getId()); + assertEquals("2.2.0", response.getHits().getAt(1).field("version").getValue()); - assertEquals("3", response.getHits().getAt(2).getId()); - assertEquals("2.2.0-badchar!", response.getHits().getAt(2).field("version").getValue()); + assertEquals("3", response.getHits().getAt(2).getId()); + assertEquals("2.2.0-badchar!", response.getHits().getAt(2).field("version").getValue()); - assertEquals("4", response.getHits().getAt(3).getId()); - assertEquals("", response.getHits().getAt(3).field("version").getValue()); + assertEquals("4", response.getHits().getAt(3).getId()); + assertEquals("", response.getHits().getAt(3).field("version").getValue()); + }); // exact match for malformed term - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.invalid.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "2.2.0-badchar!")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "1.invalid.0")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "2.2.0-badchar!")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "")), 1); // also should appear in terms aggs - response = client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("myterms").field("version")).get(); - Terms terms = response.getAggregations().get("myterms"); - List buckets = terms.getBuckets(); - - assertEquals(4, buckets.size()); - assertEquals("2.2.0", buckets.get(0).getKey()); - assertEquals("", buckets.get(1).getKey()); - assertEquals("1.invalid.0", buckets.get(2).getKey()); - assertEquals("2.2.0-badchar!", buckets.get(3).getKey()); + assertResponse( + client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("myterms").field("version")), + response -> { + Terms terms = response.getAggregations().get("myterms"); + List buckets = terms.getBuckets(); + + assertEquals(4, buckets.size()); + assertEquals("2.2.0", buckets.get(0).getKey()); + assertEquals("", buckets.get(1).getKey()); + assertEquals("1.invalid.0", buckets.get(2).getKey()); + assertEquals("2.2.0-badchar!", buckets.get(3).getKey()); + } + ); // invalid values should sort after all valid ones - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()).addSort("version", SortOrder.ASC).get(); - assertEquals(4, response.getHits().getTotalHits().value); - SearchHit[] hits = response.getHits().getHits(); - assertEquals("2.2.0", hits[0].getSortValues()[0]); - assertEquals("", hits[1].getSortValues()[0]); - assertEquals("1.invalid.0", hits[2].getSortValues()[0]); - assertEquals("2.2.0-badchar!", hits[3].getSortValues()[0]); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()).addSort("version", SortOrder.ASC), + response -> { + assertEquals(4, response.getHits().getTotalHits().value); + SearchHit[] hits = response.getHits().getHits(); + assertEquals("2.2.0", hits[0].getSortValues()[0]); + assertEquals("", hits[1].getSortValues()[0]); + assertEquals("1.invalid.0", hits[2].getSortValues()[0]); + assertEquals("2.2.0-badchar!", hits[3].getSortValues()[0]); + } + ); // ranges can include them, but they are sorted last - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("3.0.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("3.0.0")).get(); - assertEquals(3, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("3.0.0")), 1); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("3.0.0")), 3); // using the empty string as lower bound should return all "invalid" versions - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("")).get(); - assertEquals(3, response.getHits().getTotalHits().value); + assertHitCount(client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("")), 3); } public void testAggs() throws Exception { @@ -408,31 +389,39 @@ public void testAggs() throws Exception { client().admin().indices().prepareRefresh(indexName).get(); // terms aggs - SearchResponse response = client().prepareSearch(indexName) - .addAggregation(AggregationBuilders.terms("myterms").field("version")) - .get(); - Terms terms = response.getAggregations().get("myterms"); - List buckets = terms.getBuckets(); - - assertEquals(5, buckets.size()); - assertEquals("1.0", buckets.get(0).getKey()); - assertEquals("1.3.0", buckets.get(1).getKey()); - assertEquals("2.1.0-alpha", buckets.get(2).getKey()); - assertEquals("2.1.0", buckets.get(3).getKey()); - assertEquals("3.11.5", buckets.get(4).getKey()); + assertResponse( + client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("myterms").field("version")), + response -> { + Terms terms = response.getAggregations().get("myterms"); + List buckets = terms.getBuckets(); + + assertEquals(5, buckets.size()); + assertEquals("1.0", buckets.get(0).getKey()); + assertEquals("1.3.0", buckets.get(1).getKey()); + assertEquals("2.1.0-alpha", buckets.get(2).getKey()); + assertEquals("2.1.0", buckets.get(3).getKey()); + assertEquals("3.11.5", buckets.get(4).getKey()); + } + ); // cardinality - response = client().prepareSearch(indexName).addAggregation(AggregationBuilders.cardinality("myterms").field("version")).get(); - Cardinality card = response.getAggregations().get("myterms"); - assertEquals(5, card.getValue()); + assertResponse( + client().prepareSearch(indexName).addAggregation(AggregationBuilders.cardinality("myterms").field("version")), + response -> { + Cardinality card = response.getAggregations().get("myterms"); + assertEquals(5, card.getValue()); + } + ); // string stats - response = client().prepareSearch(indexName) - .addAggregation(AnalyticsAggregationBuilders.stringStats("stats").field("version")) - .get(); - InternalStringStats stats = response.getAggregations().get("stats"); - assertEquals(3, stats.getMinLength()); - assertEquals(11, stats.getMaxLength()); + assertResponse( + client().prepareSearch(indexName).addAggregation(AnalyticsAggregationBuilders.stringStats("stats").field("version")), + response -> { + InternalStringStats stats = response.getAggregations().get("stats"); + assertEquals(3, stats.getMinLength()); + assertEquals(11, stats.getMaxLength()); + } + ); } public void testMultiValues() throws Exception { @@ -447,28 +436,37 @@ public void testMultiValues() throws Exception { .get(); client().admin().indices().prepareRefresh(indexName).get(); - SearchResponse response = client().prepareSearch(indexName).addSort("version", SortOrder.ASC).get(); - assertEquals(3, response.getHits().getTotalHits().value); - assertEquals("1", response.getHits().getAt(0).getId()); - assertEquals("2", response.getHits().getAt(1).getId()); - assertEquals("3", response.getHits().getAt(2).getId()); + assertResponse(client().prepareSearch(indexName).addSort("version", SortOrder.ASC), response -> { + assertEquals(3, response.getHits().getTotalHits().value); + assertEquals("1", response.getHits().getAt(0).getId()); + assertEquals("2", response.getHits().getAt(1).getId()); + assertEquals("3", response.getHits().getAt(2).getId()); + }); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "3.0.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - assertEquals("1", response.getHits().getAt(0).getId()); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "3.0.0")), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertEquals("1", response.getHits().getAt(0).getId()); + }); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "4.alpha.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); - assertEquals("2", response.getHits().getAt(0).getId()); + assertResponse(client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("version", "4.alpha.0")), response -> { + assertEquals(1, response.getHits().getTotalHits().value); + assertEquals("2", response.getHits().getAt(0).getId()); + }); // range - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("1.5.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").to("1.5.0")), + response -> assertEquals(1, response.getHits().getTotalHits().value) + ); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.5.0")).get(); - assertEquals(3, response.getHits().getTotalHits().value); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("1.5.0")), + response -> assertEquals(3, response.getHits().getTotalHits().value) + ); - response = client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("5.0.0").to("6.0.0")).get(); - assertEquals(1, response.getHits().getTotalHits().value); + assertResponse( + client().prepareSearch(indexName).setQuery(QueryBuilders.rangeQuery("version").from("5.0.0").to("6.0.0")), + response -> assertEquals(1, response.getHits().getTotalHits().value) + ); } } diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java index 0e060b3c94644..ecfa868046275 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/LearningToRankRescorerIT.java @@ -196,6 +196,7 @@ public void setupModelAndData() throws IOException { adminClient().performRequest(new Request("POST", INDEX_NAME + "/_refresh")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103072") public void testLearningToRankRescore() throws Exception { Request request = new Request("GET", "store/_search?size=3&error_trace"); request.setJsonEntity(""" @@ -231,6 +232,7 @@ public void testLearningToRankRescore() throws Exception { assertHitScores(client().performRequest(request), List.of(9.0, 9.0, 6.0)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103072") public void testLearningToRankRescoreSmallWindow() throws Exception { Request request = new Request("GET", "store/_search?size=5"); request.setJsonEntity(""" @@ -243,6 +245,7 @@ public void testLearningToRankRescoreSmallWindow() throws Exception { assertHitScores(client().performRequest(request), List.of(20.0, 20.0, 1.0, 1.0, 1.0)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103072") public void testLearningToRankRescorerWithChainedRescorers() throws IOException { Request request = new Request("GET", "store/_search?size=5"); request.setJsonEntity(""" diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java index a16abe6408844..91e117fae05b2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java @@ -425,17 +425,25 @@ private void addInferenceUsage(ActionListener> listener) { Collections.emptySet() ); getModelsRequest.setPageParams(new PageParams(0, 10_000)); - client.execute(GetTrainedModelsAction.INSTANCE, getModelsRequest, ActionListener.wrap(getModelsResponse -> { - GetTrainedModelsStatsAction.Request getStatsRequest = new GetTrainedModelsStatsAction.Request("*"); - getStatsRequest.setPageParams(new PageParams(0, 10_000)); - client.execute(GetTrainedModelsStatsAction.INSTANCE, getStatsRequest, ActionListener.wrap(getStatsResponse -> { - Map inferenceUsage = new LinkedHashMap<>(); - addInferenceIngestUsage(getStatsResponse, inferenceUsage); - addTrainedModelStats(getModelsResponse, getStatsResponse, inferenceUsage); - addDeploymentStats(getModelsResponse, getStatsResponse, inferenceUsage); - listener.onResponse(inferenceUsage); - }, listener::onFailure)); - }, listener::onFailure)); + client.execute( + GetTrainedModelsAction.INSTANCE, + getModelsRequest, + listener.delegateFailureAndWrap((delegate, getModelsResponse) -> { + GetTrainedModelsStatsAction.Request getStatsRequest = new GetTrainedModelsStatsAction.Request("*"); + getStatsRequest.setPageParams(new PageParams(0, 10_000)); + client.execute( + GetTrainedModelsStatsAction.INSTANCE, + getStatsRequest, + delegate.delegateFailureAndWrap((l, getStatsResponse) -> { + Map inferenceUsage = new LinkedHashMap<>(); + addInferenceIngestUsage(getStatsResponse, inferenceUsage); + addTrainedModelStats(getModelsResponse, getStatsResponse, inferenceUsage); + addDeploymentStats(getModelsResponse, getStatsResponse, inferenceUsage); + l.onResponse(inferenceUsage); + }) + ); + }) + ); } else { listener.onResponse(Map.of()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 1ccf4906cf333..8c46f7229c655 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -234,16 +234,15 @@ private void triggerNlpMaintenance() { } private void triggerDeleteExpiredDataTask(ActionListener finalListener) { - ActionListener deleteExpiredDataActionListener = ActionListener.wrap( - deleteExpiredDataResponse -> { + ActionListener deleteExpiredDataActionListener = finalListener.delegateFailureAndWrap( + (l, deleteExpiredDataResponse) -> { if (deleteExpiredDataResponse.isDeleted()) { logger.info("Successfully completed [ML] maintenance task: triggerDeleteExpiredDataTask"); } else { logger.info("Halting [ML] maintenance tasks before completion as elapsed time is too great"); } - finalListener.onResponse(AcknowledgedResponse.TRUE); - }, - finalListener::onFailure + l.onResponse(AcknowledgedResponse.TRUE); + } ); executeAsyncWithOrigin( @@ -259,8 +258,8 @@ private void triggerDeleteExpiredDataTask(ActionListener f public void triggerDeleteJobsInStateDeletingWithoutDeletionTask(ActionListener finalListener) { SetOnce> jobsInStateDeletingHolder = new SetOnce<>(); - ActionListener>> deleteJobsActionListener = ActionListener.wrap( - deleteJobsResponses -> { + ActionListener>> deleteJobsActionListener = finalListener + .delegateFailureAndWrap((delegate, deleteJobsResponses) -> { List jobIds = deleteJobsResponses.stream() .filter(t -> t.v2().isAcknowledged() == false) .map(Tuple::v1) @@ -271,10 +270,8 @@ public void triggerDeleteJobsInStateDeletingWithoutDeletionTask(ActionListener listTasksActionListener = ActionListener.wrap(listTasksResponse -> { Set jobsInStateDeleting = jobsInStateDeletingHolder.get(); @@ -302,7 +299,7 @@ public void triggerDeleteJobsInStateDeletingWithoutDeletionTask(ActionListener listener.onResponse(Tuple.tuple(request, response)), listener::onFailure) + listener.delegateFailureAndWrap((l, response) -> l.onResponse(Tuple.tuple(request, response))) ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java index 2dcb9c5dfe705..1d6692f533b9c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java @@ -67,7 +67,7 @@ public void doExecute(Task task, Request request, ActionListener liste logger.debug("[{}] cancel model snapshot [{}] upgrades", request.getJobId(), request.getSnapshotId()); // 2. Now that we have the job IDs, find the relevant model snapshot upgrade tasks - ActionListener> expandIdsListener = ActionListener.wrap(jobs -> { + ActionListener> expandIdsListener = listener.delegateFailureAndWrap((delegate, jobs) -> { SimpleIdsMatcher matcher = new SimpleIdsMatcher(request.getSnapshotId()); Set jobIds = jobs.stream().map(Job.Builder::getId).collect(Collectors.toSet()); PersistentTasksCustomMetadata tasksInProgress = clusterService.state().metadata().custom(PersistentTasksCustomMetadata.TYPE); @@ -81,8 +81,8 @@ public void doExecute(Task task, Request request, ActionListener liste .filter(t -> jobIds.contains(((SnapshotUpgradeTaskParams) t.getParams()).getJobId())) .filter(t -> matcher.idMatches(((SnapshotUpgradeTaskParams) t.getParams()).getSnapshotId())) .collect(Collectors.toList()); - removePersistentTasks(request, upgradeTasksToCancel, listener); - }, listener::onFailure); + removePersistentTasks(request, upgradeTasksToCancel, delegate); + }); // 1. Expand jobs - this will throw if a required job ID match isn't made. Jobs being deleted are included here. jobConfigProvider.expandJobs(request.getJobId(), request.allowNoMatch(), false, null, expandIdsListener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java index 5ec1963ecec52..44235882a6582 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java @@ -96,6 +96,6 @@ protected void taskOperation( TrainedModelDeploymentTask task, ActionListener listener ) { - task.clearCache(ActionListener.wrap(r -> listener.onResponse(new Response(true)), listener::onFailure)); + task.clearCache(listener.delegateFailureAndWrap((l, r) -> l.onResponse(new Response(true)))); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 7b561ccaede2d..7512aa2b42acf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -160,68 +160,78 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen tasksMetadata, isForce, null, - ActionListener.wrap( - expandedJobIds -> validate( + listener.delegateFailureAndWrap( + (delegate, expandedJobIds) -> validate( expandedJobIds, isForce, tasksMetadata, - ActionListener.wrap( - response -> stopDatafeedsIfNecessary(response, isForce, timeout, tasksMetadata, ActionListener.wrap(bool -> { - request.setOpenJobIds(response.openJobIds.toArray(new String[0])); - if (response.openJobIds.isEmpty() && response.closingJobIds.isEmpty()) { - listener.onResponse(new CloseJobAction.Response(true)); - return; - } - - if (isForce) { - List jobIdsToForceClose = new ArrayList<>(response.openJobIds); - jobIdsToForceClose.addAll(response.closingJobIds); - forceCloseJob(state, request, jobIdsToForceClose, listener); - } else { - Set executorNodes = new HashSet<>(); - PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); - for (String resolvedJobId : request.getOpenJobIds()) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(resolvedJobId, tasks); - if (jobTask == null) { - // This should not happen, because openJobIds was - // derived from the same tasks metadata as jobTask - String msg = "Requested job [" - + resolvedJobId - + "] be stopped, but job's task could not be found."; - assert jobTask != null : msg; - logger.error(msg); - } else if (jobTask.isAssigned()) { - executorNodes.add(jobTask.getExecutorNode()); - } else { - // This is the easy case - the job is not currently assigned to a node, so can - // be gracefully stopped simply by removing its persistent task. (Usually a - // graceful stop cannot be achieved by simply removing the persistent task, but - // if the job has no running code then graceful/forceful are basically the same.) - // The listener here can be a no-op, as waitForJobClosed() already waits for - // these persistent tasks to disappear. - persistentTasksService.sendRemoveRequest( - jobTask.getId(), - ActionListener.wrap( - r -> logger.trace( - () -> format("[%s] removed task to close unassigned job", resolvedJobId) - ), - e -> logger.error( - () -> format("[%s] failed to remove task to close unassigned job", resolvedJobId), - e - ) - ) + delegate.delegateFailureAndWrap( + (delegate2, response) -> stopDatafeedsIfNecessary( + response, + isForce, + timeout, + tasksMetadata, + delegate2.delegateFailureAndWrap((delegate3, bool) -> { + request.setOpenJobIds(response.openJobIds.toArray(new String[0])); + if (response.openJobIds.isEmpty() && response.closingJobIds.isEmpty()) { + delegate3.onResponse(new CloseJobAction.Response(true)); + return; + } + + if (isForce) { + List jobIdsToForceClose = new ArrayList<>(response.openJobIds); + jobIdsToForceClose.addAll(response.closingJobIds); + forceCloseJob(state, request, jobIdsToForceClose, delegate3); + } else { + Set executorNodes = new HashSet<>(); + PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + for (String resolvedJobId : request.getOpenJobIds()) { + PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask( + resolvedJobId, + tasks ); + if (jobTask == null) { + // This should not happen, because openJobIds was + // derived from the same tasks metadata as jobTask + String msg = "Requested job [" + + resolvedJobId + + "] be stopped, but job's task could not be found."; + assert jobTask != null : msg; + logger.error(msg); + } else if (jobTask.isAssigned()) { + executorNodes.add(jobTask.getExecutorNode()); + } else { + // This is the easy case - the job is not currently assigned to a node, so can + // be gracefully stopped simply by removing its persistent task. (Usually a + // graceful stop cannot be achieved by simply removing the persistent task, but + // if the job has no running code then graceful/forceful are basically the same.) + // The listener here can be a no-op, as waitForJobClosed() already waits for + // these persistent tasks to disappear. + persistentTasksService.sendRemoveRequest( + jobTask.getId(), + ActionListener.wrap( + r -> logger.trace( + () -> format("[%s] removed task to close unassigned job", resolvedJobId) + ), + e -> logger.error( + () -> format( + "[%s] failed to remove task to close unassigned job", + resolvedJobId + ), + e + ) + ) + ); + } } - } - request.setNodes(executorNodes.toArray(new String[0])); + request.setNodes(executorNodes.toArray(new String[0])); - normalCloseJob(state, task, request, response.openJobIds, response.closingJobIds, listener); - } - }, listener::onFailure)), - listener::onFailure + normalCloseJob(state, task, request, response.openJobIds, response.closingJobIds, delegate3); + } + }) + ) ) - ), - listener::onFailure + ) ) ); } @@ -287,12 +297,12 @@ void stopDatafeedsIfNecessary( PersistentTasksCustomMetadata tasksMetadata, ActionListener listener ) { - datafeedConfigProvider.findDatafeedIdsForJobIds(jobIds.openJobIds, ActionListener.wrap(datafeedIds -> { + datafeedConfigProvider.findDatafeedIdsForJobIds(jobIds.openJobIds, listener.delegateFailureAndWrap((delegate, datafeedIds) -> { List runningDatafeedIds = datafeedIds.stream() .filter(datafeedId -> MlTasks.getDatafeedState(datafeedId, tasksMetadata) != DatafeedState.STOPPED) .collect(Collectors.toList()); if (runningDatafeedIds.isEmpty()) { - listener.onResponse(false); + delegate.onResponse(false); } else { if (isForce) { // A datafeed with an end time will gracefully close its job when it stops even if it was force stopped. @@ -307,17 +317,13 @@ void stopDatafeedsIfNecessary( isolateDatafeeds( jobIds.openJobIds, runningDatafeedIds, - ActionListener.wrap( - r -> stopDatafeeds(runningDatafeedIds, true, timeout, listener), - // As things stand this will never be called - see the comment in isolateDatafeeds() for why - listener::onFailure - ) + delegate.delegateFailureAndWrap((l, r) -> stopDatafeeds(runningDatafeedIds, true, timeout, l)) ); } else { - stopDatafeeds(runningDatafeedIds, false, timeout, listener); + stopDatafeeds(runningDatafeedIds, false, timeout, delegate); } } - }, listener::onFailure)); + })); } private void stopDatafeeds(List runningDatafeedIds, boolean isForce, TimeValue timeout, ActionListener listener) { @@ -579,7 +585,7 @@ private void normalCloseJob( final Set movedJobs = ConcurrentCollections.newConcurrentSet(); - ActionListener intermediateListener = ActionListener.wrap(response -> { + ActionListener intermediateListener = listener.delegateFailureAndWrap((delegate, response) -> { for (String jobId : movedJobs) { PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); persistentTasksService.sendRemoveRequest( @@ -593,8 +599,8 @@ private void normalCloseJob( }) ); } - listener.onResponse(response); - }, listener::onFailure); + delegate.onResponse(response); + }); boolean noOpenJobsToClose = openJobIds.isEmpty(); if (noOpenJobsToClose) { @@ -603,9 +609,8 @@ private void normalCloseJob( return; } - ActionListener finalListener = ActionListener.wrap( - r -> waitForJobClosed(request, waitForCloseRequest, r, intermediateListener, movedJobs), - listener::onFailure + ActionListener finalListener = intermediateListener.delegateFailureAndWrap( + (l, r) -> waitForJobClosed(request, waitForCloseRequest, r, l, movedJobs) ); super.doExecute(task, request, finalListener); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java index 13e04772683eb..7442f1db0a662 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java @@ -108,7 +108,7 @@ private void doInferenceServiceModel(CoordinatedInferenceAction.Request request, INFERENCE_ORIGIN, InferenceAction.INSTANCE, new InferenceAction.Request(TaskType.ANY, request.getModelId(), request.getInputs(), request.getTaskSettings()), - ActionListener.wrap(r -> listener.onResponse(translateInferenceServiceResponse(r.getResults())), listener::onFailure) + listener.delegateFailureAndWrap((l, r) -> l.onResponse(translateInferenceServiceResponse(r.getResults()))) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java index 25f19f9300a19..348cb396f9c9f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCreateTrainedModelAssignmentAction.java @@ -76,7 +76,7 @@ public TransportCreateTrainedModelAssignmentAction( protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { trainedModelAssignmentClusterService.createNewModelAssignment( request.getTaskParams(), - ActionListener.wrap(trainedModelAssignment -> listener.onResponse(new Response(trainedModelAssignment)), listener::onFailure) + listener.delegateFailureAndWrap((l, trainedModelAssignment) -> l.onResponse(new Response(trainedModelAssignment))) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java index e7afc79bd3644..729bed1709162 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java @@ -61,21 +61,27 @@ protected void doExecute(Task task, DeleteCalendarAction.Request request, Action final String calendarId = request.getCalendarId(); - ActionListener calendarListener = ActionListener.wrap(calendar -> { + ActionListener calendarListener = listener.delegateFailureAndWrap((delegate, calendar) -> { // Delete calendar and events DeleteByQueryRequest dbqRequest = buildDeleteByQuery(calendarId); - executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, dbqRequest, ActionListener.wrap(response -> { - if (response.getDeleted() == 0) { - listener.onFailure(new ResourceNotFoundException("No calendar with id [" + calendarId + "]")); - return; - } + executeAsyncWithOrigin( + client, + ML_ORIGIN, + DeleteByQueryAction.INSTANCE, + dbqRequest, + delegate.delegateFailureAndWrap((l, response) -> { + if (response.getDeleted() == 0) { + l.onFailure(new ResourceNotFoundException("No calendar with id [" + calendarId + "]")); + return; + } - jobManager.updateProcessOnCalendarChanged( - calendar.getJobIds(), - ActionListener.wrap(r -> listener.onResponse(AcknowledgedResponse.TRUE), listener::onFailure) - ); - }, listener::onFailure)); - }, listener::onFailure); + jobManager.updateProcessOnCalendarChanged( + calendar.getJobIds(), + l.delegateFailureAndWrap((ll, r) -> ll.onResponse(AcknowledgedResponse.TRUE)) + ); + }) + ); + }); jobResultsProvider.calendar(calendarId, calendarListener); } diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index 506cd016cf8de..a0990330aecb7 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -382,7 +383,7 @@ public void enableMonitoring() throws Exception { assertThat( "No monitoring documents yet", - client().prepareSearch(".monitoring-es-" + TEMPLATE_VERSION + "-*").setSize(0).get().getHits().getTotalHits().value, + SearchResponseUtils.getTotalHitsValue(client().prepareSearch(".monitoring-es-" + TEMPLATE_VERSION + "-*").setSize(0)), greaterThan(0L) ); }, 30L, TimeUnit.SECONDS); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java index 71a2abd28259a..d3794201caec8 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java @@ -112,10 +112,9 @@ private ActionListener completeOnManagementThread ActionListener delegate ) { // Send failures to the final listener directly, and on success, fork to management thread and execute best effort alert removal - return ActionListener.wrap( - (response) -> threadPool.executor(ThreadPool.Names.MANAGEMENT) - .execute(ActionRunnable.wrap(delegate, (listener) -> afterSettingUpdate(listener, response))), - delegate::onFailure + return delegate.delegateFailure( + (l, response) -> threadPool.executor(ThreadPool.Names.MANAGEMENT) + .execute(ActionRunnable.wrap(l, (listener) -> afterSettingUpdate(listener, response))) ); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index 5103f3a25cc33..84d64bd6dec82 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -929,16 +929,15 @@ public void openBulk(final ActionListener listener) { } if (migrationCoordinator.canInstall()) { - resource.checkAndPublishIfDirty(client, ActionListener.wrap((success) -> { + resource.checkAndPublishIfDirty(client, listener.delegateFailureAndWrap((delegate, success) -> { if (success) { final String name = "xpack.monitoring.exporters." + config.name(); - - listener.onResponse(new HttpExportBulk(name, client, defaultParams, dateTimeFormatter, threadContext)); + delegate.onResponse(new HttpExportBulk(name, client, defaultParams, dateTimeFormatter, threadContext)); } else { // we're not ready yet, so keep waiting - listener.onResponse(null); + delegate.onResponse(null); } - }, listener::onFailure)); + })); } else { // we're migrating right now, so keep waiting listener.onResponse(null); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index e2d4d173af013..9bfea90a28489 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -137,14 +137,14 @@ public Map getDefaultParameters() { */ @Override protected final void doCheckAndPublish(final RestClient client, final ActionListener listener) { - doCheck(client, ActionListener.wrap(exists -> { + doCheck(client, listener.delegateFailureAndWrap((l, exists) -> { if (exists) { // it already exists, so we can skip publishing it - listener.onResponse(ResourcePublishResult.ready()); + l.onResponse(ResourcePublishResult.ready()); } else { - doPublish(client, listener); + doPublish(client, l); } - }, listener::onFailure)); + })); } /** diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index a732e80e18f37..ef4f22f852b37 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.test.ESIntegTestCase; @@ -126,47 +127,37 @@ public void testExport() throws Exception { ensureYellowAndNoInitializingShards(".monitoring-*"); assertThat( - prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "cluster_stats")) - .get() - .getHits() - .getTotalHits().value, + SearchResponseUtils.getTotalHitsValue( + prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "cluster_stats")) + ), greaterThan(0L) ); assertThat( - prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "index_recovery")) - .get() - .getHits() - .getTotalHits().value, + SearchResponseUtils.getTotalHitsValue( + prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "index_recovery")) + ), greaterThan(0L) ); assertThat( - prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "index_stats")) - .get() - .getHits() - .getTotalHits().value, + SearchResponseUtils.getTotalHitsValue( + prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "index_stats")) + ), greaterThan(0L) ); assertThat( - prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "indices_stats")) - .get() - .getHits() - .getTotalHits().value, + SearchResponseUtils.getTotalHitsValue( + prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "indices_stats")) + ), greaterThan(0L) ); assertThat( - prepareSearch(".monitoring-es-*").setSize(0) - .setQuery(QueryBuilders.termQuery("type", "shards")) - .get() - .getHits() - .getTotalHits().value, + SearchResponseUtils.getTotalHitsValue( + prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "shards")) + ), greaterThan(0L) ); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 901150d0bca7c..d6e15ea25c8e1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -265,19 +265,21 @@ private void assertWatchesExist() { SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource() .query(QueryBuilders.matchQuery("metadata.xpack.cluster_uuid", clusterUUID)); Set watchIds = new HashSet<>(Arrays.asList(ClusterAlertsUtil.WATCH_IDS)); - for (SearchHit hit : prepareSearch(".watches").setSource(searchSource).get().getHits().getHits()) { - String watchId = ObjectPath.eval("metadata.xpack.watch", hit.getSourceAsMap()); - assertNotNull("Missing watch ID", watchId); - assertTrue("found unexpected watch id", watchIds.contains(watchId)); - - String version = ObjectPath.eval("metadata.xpack.version_created", hit.getSourceAsMap()); - assertNotNull("Missing version from returned watch [" + watchId + "]", version); - assertTrue(Version.fromId(Integer.parseInt(version)).onOrAfter(Version.fromId(ClusterAlertsUtil.LAST_UPDATED_VERSION))); - - String uuid = ObjectPath.eval("metadata.xpack.cluster_uuid", hit.getSourceAsMap()); - assertNotNull("Missing cluster uuid", uuid); - assertEquals(clusterUUID, uuid); - } + assertResponse(prepareSearch(".watches").setSource(searchSource), response -> { + for (SearchHit hit : response.getHits().getHits()) { + String watchId = ObjectPath.eval("metadata.xpack.watch", hit.getSourceAsMap()); + assertNotNull("Missing watch ID", watchId); + assertTrue("found unexpected watch id", watchIds.contains(watchId)); + + String version = ObjectPath.eval("metadata.xpack.version_created", hit.getSourceAsMap()); + assertNotNull("Missing version from returned watch [" + watchId + "]", version); + assertTrue(Version.fromId(Integer.parseInt(version)).onOrAfter(Version.fromId(ClusterAlertsUtil.LAST_UPDATED_VERSION))); + + String uuid = ObjectPath.eval("metadata.xpack.cluster_uuid", hit.getSourceAsMap()); + assertNotNull("Missing cluster uuid", uuid); + assertEquals(clusterUUID, uuid); + } + }); } private void assertNoWatchesExist() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java index 8264471222b57..1cbc724132d10 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java @@ -17,8 +17,11 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Consumer; final class StackTrace implements ToXContentObject { + static final int NATIVE_FRAME_TYPE = 3; + static final int KERNEL_FRAME_TYPE = 4; List addressOrLines; List fileIds; List frameIds; @@ -215,6 +218,15 @@ public static StackTrace fromSource(Map source) { return new StackTrace(addressOrLines, fileIDs, frameIDs, typeIDs, 0, 0, 0); } + public void forNativeAndKernelFrames(Consumer consumer) { + for (int i = 0; i < this.fileIds.size(); i++) { + Integer frameType = this.typeIds.get(i); + if (frameType != null && (frameType == NATIVE_FRAME_TYPE || frameType == KERNEL_FRAME_TYPE)) { + consumer.accept(this.fileIds.get(i)); + } + } + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 3fa47beebd70a..735a971c53536 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -526,7 +526,7 @@ public void onStackTraceResponse(MultiGetResponse multiGetItemResponses) { if (stackTracePerId.putIfAbsent(id, stacktrace) == null) { totalFrames.addAndGet(stacktrace.frameIds.size()); stackFrameIds.addAll(stacktrace.frameIds); - executableIds.addAll(stacktrace.fileIds); + stacktrace.forNativeAndKernelFrames(e -> executableIds.add(e)); } } } diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java index 8f134a9d37502..f12ae0bc86571 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java @@ -23,6 +23,7 @@ import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -31,11 +32,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.TestSecurityClient; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; import org.elasticsearch.test.cluster.local.LocalClusterSpec; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; @@ -81,6 +84,11 @@ public class JwtRestIT extends ESRestTestCase { ]}""".replaceAll("\\s", ""); public static final String HMAC_PASSPHRASE = "test-HMAC/secret passphrase-value"; private static final String VALID_SHARED_SECRET = "test-secret"; + private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider() { + { + put("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", VALID_SHARED_SECRET); + } + }; @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() @@ -105,10 +113,10 @@ public class JwtRestIT extends ESRestTestCase { .setting("xpack.security.http.ssl.certificate_authorities", "ca.crt") .setting("xpack.security.http.ssl.client_authentication", "optional") .settings(JwtRestIT::realmSettings) - .keystore("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", VALID_SHARED_SECRET) .keystore("xpack.security.authc.realms.jwt.jwt2.hmac_key", HMAC_PASSPHRASE) .keystore("xpack.security.authc.realms.jwt.jwt3.hmac_jwkset", HMAC_JWKSET) .keystore("xpack.security.authc.realms.jwt.jwt3.client_authentication.shared_secret", VALID_SHARED_SECRET) + .keystore(keystoreSettings) .user("admin_user", "admin-password") .user("test_file_user", "test-password", "viewer", false) .build(); @@ -170,6 +178,7 @@ private static Map realmSettings(LocalClusterSpec.LocalNodeSpec settings.put("xpack.security.authc.realms.jwt.jwt2.required_claims.token_use", "access"); settings.put("xpack.security.authc.realms.jwt.jwt2.authorization_realms", "lookup_native"); settings.put("xpack.security.authc.realms.jwt.jwt2.client_authentication.type", "shared_secret"); + settings.put("xpack.security.authc.realms.jwt.jwt2.client_authentication.rotation_grace_period", "0s"); // Place PKI realm after JWT realm to verify realm chain fall-through settings.put("xpack.security.authc.realms.pki.pki_realm.order", "4"); @@ -499,6 +508,62 @@ public void testAuthenticationFailureIfDelegatedAuthorizationFails() throws Exce } } + public void testReloadClientSecret() throws Exception { + final String principal = SERVICE_SUBJECT.get(); + final String username = getUsernameFromPrincipal(principal); + final List roles = randomRoles(); + createUser(username, roles, Map.of()); + + try { + getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(VALID_SHARED_SECRET)).authenticate(); + + // secret not updated yet, so authentication fails + final String newValidSharedSecret = "new-valid-secret"; + assertThat( + expectThrows( + ResponseException.class, + () -> getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate() + ).getResponse(), + hasStatusCode(RestStatus.UNAUTHORIZED) + ); + + writeSettingToKeystoreThenReload( + "xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", + newValidSharedSecret + ); + + // secret updated, so authentication succeeds + getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate(); + + // removing setting also works and leads to authentication failure + writeSettingToKeystoreThenReload("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", null); + assertThat( + expectThrows( + ResponseException.class, + () -> getSecurityClient(buildAndSignJwtForRealm2(principal), Optional.of(newValidSharedSecret)).authenticate() + ).getResponse(), + hasStatusCode(RestStatus.UNAUTHORIZED) + ); + } finally { + // Restore setting for other tests + writeSettingToKeystoreThenReload( + "xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", + VALID_SHARED_SECRET + ); + deleteUser(username); + } + } + + private void writeSettingToKeystoreThenReload(String setting, @Nullable String value) throws IOException { + if (value == null) { + keystoreSettings.remove(setting); + } else { + keystoreSettings.put(setting, value); + } + cluster.updateStoredSecureSettings(); + assertOK(adminClient().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + } + public void testFailureOnInvalidClientAuthentication() throws Exception { final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java index 7aeaccf63bab4..b1a76a4559812 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; @@ -25,6 +24,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -77,8 +77,7 @@ public void testDateMathExpressionsCanBeAuthorized() throws Exception { if (refeshOnOperation == false) { client.admin().indices().prepareRefresh(expression).get(); } - SearchResponse searchResponse = client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, is(1L)); + assertHitCount(client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()), 1); assertResponse( client.prepareMultiSearch().add(client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).request()), diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index 0e8cb486ffb2d..1e1d8a7f0654c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.SecureString; @@ -191,31 +191,31 @@ public void testRequestCacheForDLS() { final Client limitedClient = limitedClient(); // Search first with power client, it should see all docs - assertSearchResponse(powerClient.prepareSearch(DLS_INDEX).setRequestCache(true).get(), Set.of("101", "102")); + assertSearchResponse(powerClient.prepareSearch(DLS_INDEX).setRequestCache(true), Set.of("101", "102")); assertCacheState(DLS_INDEX, 0, 1); // Search with the limited client and it should see only one doc (i.e. it won't use cache entry for power client) - assertSearchResponse(limitedClient.prepareSearch(DLS_INDEX).setRequestCache(true).get(), Set.of("101")); + assertSearchResponse(limitedClient.prepareSearch(DLS_INDEX).setRequestCache(true), Set.of("101")); assertCacheState(DLS_INDEX, 0, 2); // Execute the above search again and it should use the cache entry for limited client - assertSearchResponse(limitedClient.prepareSearch(DLS_INDEX).setRequestCache(true).get(), Set.of("101")); + assertSearchResponse(limitedClient.prepareSearch(DLS_INDEX).setRequestCache(true), Set.of("101")); assertCacheState(DLS_INDEX, 1, 2); // Execute the search with power client again and it should still see all docs - assertSearchResponse(powerClient.prepareSearch(DLS_INDEX).setRequestCache(true).get(), Set.of("101", "102")); + assertSearchResponse(powerClient.prepareSearch(DLS_INDEX).setRequestCache(true), Set.of("101", "102")); assertCacheState(DLS_INDEX, 2, 2); // The limited client has a different DLS query for dls-alias compared to the underlying dls-index - assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS).setRequestCache(true).get(), Set.of("102")); + assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS).setRequestCache(true), Set.of("102")); assertCacheState(DLS_INDEX, 2, 3); - assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS).setRequestCache(true).get(), Set.of("102")); + assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS).setRequestCache(true), Set.of("102")); assertCacheState(DLS_INDEX, 3, 3); // Search with limited client for dls-alias and dls-index returns all docs. The cache entry is however different // from the power client, i.e. still no sharing even if the end results are the same. This is because the // search with limited client still have DLS queries attached to it. - assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS, DLS_INDEX).setRequestCache(true).get(), Set.of("101", "102")); + assertSearchResponse(limitedClient.prepareSearch(DLS_ALIAS, DLS_INDEX).setRequestCache(true), Set.of("101", "102")); assertCacheState(DLS_INDEX, 3, 4); } @@ -224,37 +224,29 @@ public void testRequestCacheForFLS() { final Client limitedClient = limitedClient(); // Search first with power client, it should see all fields - assertSearchResponse( - powerClient.prepareSearch(FLS_INDEX).setRequestCache(true).get(), - Set.of("201", "202"), - Set.of("public", "private") - ); + assertSearchResponse(powerClient.prepareSearch(FLS_INDEX).setRequestCache(true), Set.of("201", "202"), Set.of("public", "private")); assertCacheState(FLS_INDEX, 0, 1); // Search with limited client and it should see only public field - assertSearchResponse(limitedClient.prepareSearch(FLS_INDEX).setRequestCache(true).get(), Set.of("201", "202"), Set.of("public")); + assertSearchResponse(limitedClient.prepareSearch(FLS_INDEX).setRequestCache(true), Set.of("201", "202"), Set.of("public")); assertCacheState(FLS_INDEX, 0, 2); // Search with limited client again and it should use the cache - assertSearchResponse(limitedClient.prepareSearch(FLS_INDEX).setRequestCache(true).get(), Set.of("201", "202"), Set.of("public")); + assertSearchResponse(limitedClient.prepareSearch(FLS_INDEX).setRequestCache(true), Set.of("201", "202"), Set.of("public")); assertCacheState(FLS_INDEX, 1, 2); // Search again with power client, it should use its own cache entry - assertSearchResponse( - powerClient.prepareSearch(FLS_INDEX).setRequestCache(true).get(), - Set.of("201", "202"), - Set.of("public", "private") - ); + assertSearchResponse(powerClient.prepareSearch(FLS_INDEX).setRequestCache(true), Set.of("201", "202"), Set.of("public", "private")); assertCacheState(FLS_INDEX, 2, 2); // The fls-alias has a different FLS definition compared to its underlying fls-index. - assertSearchResponse(limitedClient.prepareSearch(FLS_ALIAS).setRequestCache(true).get(), Set.of("201", "202"), Set.of("private")); + assertSearchResponse(limitedClient.prepareSearch(FLS_ALIAS).setRequestCache(true), Set.of("201", "202"), Set.of("private")); assertCacheState(FLS_INDEX, 2, 3); // Search with the limited client for both fls-alias and fls-index and all docs and fields are also returned. // But request cache is not shared with the power client because it still has a different indexAccessControl assertSearchResponse( - limitedClient.prepareSearch(FLS_ALIAS, FLS_INDEX).setRequestCache(true).get(), + limitedClient.prepareSearch(FLS_ALIAS, FLS_INDEX).setRequestCache(true), Set.of("201", "202"), Set.of("public", "private") ); @@ -267,7 +259,7 @@ public void testRequestCacheForBothDLSandFLS() throws ExecutionException, Interr // Search first with power client, it should see all fields assertSearchResponse( - powerClient.prepareSearch(INDEX).setRequestCache(true).get(), + powerClient.prepareSearch(INDEX).setRequestCache(true), Set.of("1", "2"), Set.of("number", "letter", "public", "private") ); @@ -278,25 +270,17 @@ public void testRequestCacheForBothDLSandFLS() throws ExecutionException, Interr expectThrows(ElasticsearchSecurityException.class, () -> limitedClient.prepareSearch(INDEX).setRequestCache(true).get()); // Search for alias1 that points to index and has DLS/FLS - assertSearchResponse( - limitedClient.prepareSearch(ALIAS1).setRequestCache(true).get(), - Set.of("1"), - Set.of("number", "letter", "public") - ); + assertSearchResponse(limitedClient.prepareSearch(ALIAS1).setRequestCache(true), Set.of("1"), Set.of("number", "letter", "public")); assertCacheState(INDEX, 0, 2); // Search for alias2 that also points to index but has a different set of DLS/FLS - assertSearchResponse( - limitedClient.prepareSearch(ALIAS2).setRequestCache(true).get(), - Set.of("2"), - Set.of("number", "letter", "private") - ); + assertSearchResponse(limitedClient.prepareSearch(ALIAS2).setRequestCache(true), Set.of("2"), Set.of("number", "letter", "private")); assertCacheState(INDEX, 0, 3); // Search for all-alias that has full read access to the underlying index // This makes it share the cache entry of the power client assertSearchResponse( - limitedClient.prepareSearch(ALL_ALIAS).setRequestCache(true).get(), + limitedClient.prepareSearch(ALL_ALIAS).setRequestCache(true), Set.of("1", "2"), Set.of("number", "letter", "public", "private") ); @@ -305,7 +289,7 @@ public void testRequestCacheForBothDLSandFLS() throws ExecutionException, Interr // Similarly, search for alias1 and all-alias results in full read access to the index // and again reuse the cache entry of the power client assertSearchResponse( - limitedClient.prepareSearch(ALIAS1, ALL_ALIAS).setRequestCache(true).get(), + limitedClient.prepareSearch(ALIAS1, ALL_ALIAS).setRequestCache(true), Set.of("1", "2"), Set.of("number", "letter", "public", "private") ); @@ -314,7 +298,7 @@ public void testRequestCacheForBothDLSandFLS() throws ExecutionException, Interr // Though search for both alias1 and alias2 is effectively full read access to index, // it does not share the cache entry of the power client because role queries still exist. assertSearchResponse( - limitedClient.prepareSearch(ALIAS1, ALIAS2).setRequestCache(true).get(), + limitedClient.prepareSearch(ALIAS1, ALIAS2).setRequestCache(true), Set.of("1", "2"), Set.of("number", "letter", "public", "private") ); @@ -325,7 +309,7 @@ public void testRequestCacheForBothDLSandFLS() throws ExecutionException, Interr // It should not reuse any entries from the cache assertSearchResponse( - limitedClientApiKey.prepareSearch(ALL_ALIAS).setRequestCache(true).get(), + limitedClientApiKey.prepareSearch(ALL_ALIAS).setRequestCache(true), Set.of("1"), Set.of("letter", "public", "private") ); @@ -341,43 +325,23 @@ public void testRequestCacheWithTemplateRoleQuery() { ); // Search first with user1 and only one document will be return with the corresponding username - assertSearchResponse( - client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true).get(), - Set.of("1"), - Set.of("username") - ); + assertSearchResponse(client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true), Set.of("1"), Set.of("username")); assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 0, 1); // Search with user2 will not use user1's cache because template query is resolved differently for them - assertSearchResponse( - client2.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true).get(), - Set.of("2"), - Set.of("username") - ); + assertSearchResponse(client2.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true), Set.of("2"), Set.of("username")); assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 0, 2); // Search with user1 again will use user1's cache - assertSearchResponse( - client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true).get(), - Set.of("1"), - Set.of("username") - ); + assertSearchResponse(client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true), Set.of("1"), Set.of("username")); assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 1, 2); // Search with user2 again will use user2's cache - assertSearchResponse( - client2.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true).get(), - Set.of("2"), - Set.of("username") - ); + assertSearchResponse(client2.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_INDEX).setRequestCache(true), Set.of("2"), Set.of("username")); assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 2, 2); // Since the DLS for the alias uses a stored script, this should cause the request cached to be disabled - assertSearchResponse( - client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_ALIAS).setRequestCache(true).get(), - Set.of("1"), - Set.of("username") - ); + assertSearchResponse(client1.prepareSearch(DLS_TEMPLATE_ROLE_QUERY_ALIAS).setRequestCache(true), Set.of("1"), Set.of("username")); // No cache should be used assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 2, 2); } @@ -455,19 +419,24 @@ private Client limitedClientApiKey() throws ExecutionException, InterruptedExcep return client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKey)); } - private void assertSearchResponse(SearchResponse searchResponse, Set docIds) { - assertSearchResponse(searchResponse, docIds, null); + private void assertSearchResponse(SearchRequestBuilder requestBuilder, Set docIds) { + assertSearchResponse(requestBuilder, docIds, null); } - private void assertSearchResponse(SearchResponse searchResponse, Set docIds, Set fieldNames) { - assertThat(searchResponse.getFailedShards(), equalTo(0)); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) docIds.size())); - final SearchHit[] hits = searchResponse.getHits().getHits(); - assertThat(Arrays.stream(hits).map(SearchHit::getId).collect(Collectors.toUnmodifiableSet()), equalTo(docIds)); - if (fieldNames != null) { - for (SearchHit hit : hits) { - assertThat(hit.getSourceAsMap().keySet(), equalTo(fieldNames)); + private void assertSearchResponse(SearchRequestBuilder requestBuilder, Set docIds, Set fieldNames) { + var searchResponse = requestBuilder.get(); + try { + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) docIds.size())); + final SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(Arrays.stream(hits).map(SearchHit::getId).collect(Collectors.toUnmodifiableSet()), equalTo(docIds)); + if (fieldNames != null) { + for (SearchHit hit : hits) { + assertThat(hit.getSourceAsMap().keySet(), equalTo(fieldNames)); + } } + } finally { + searchResponse.decRef(); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java index 164d28216ea93..57d18abaf1a92 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.settings.SecureString; @@ -35,6 +34,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -113,32 +113,41 @@ public void testSimpleQuery() { prepareIndex("test").setId("1").setSource("id", "1", "field1", "value1").setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").get(); - assertHitCount(response, 1); - assertSearchHits(response, "1"); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id").toString(), equalTo("1")); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "2"); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id").toString(), equalTo("2")); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareSearch("test") - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertSearchHits(response, "1", "2"); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "1"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id").toString(), equalTo("1")); + } + ); + + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id").toString(), equalTo("2")); + } + ); + + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); } public void testUpdatesAreRejected() { @@ -181,13 +190,17 @@ public void testDLSIsAppliedBeforeFLS() { prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value1").setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("2").setSource("field1", "value2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD)) - ).prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value2")).get(); - assertHitCount(response, 1); - assertSearchHits(response, "2"); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(QueryBuilders.termQuery("field1", "value2")), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value2")); + } + ); assertHitCount( client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) @@ -209,48 +222,60 @@ public void testQueryCache() { // Both users have the same role query, but user3 has access to field2 and not field1, which should result in zero hits: int max = scaledRandomIntBetween(4, 32); for (int i = 0; i < max; i++) { - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("1")); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), equalTo("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("1")); + } + ); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("2")); + } + ); // this is a bit weird the document level permission (all docs with field2:value2) don't match with the field level // permissions (field1), // this results in document 2 being returned but no fields are visible: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("2")); + } + ); // user4 has all roles - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareSearch("test") - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(response, 2); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(1).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2"), equalTo("value2")); - assertThat(response.getHits().getAt(1).getSourceAsMap().get("id"), equalTo("2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("id", SortOrder.ASC), + response -> { + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("id"), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2"), equalTo("value2")); + assertThat(response.getHits().getAt(1).getSourceAsMap().get("id"), equalTo("2")); + } + ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java index 116e94cafcadf..258c4acd6c7f2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityFeatureUsageTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.integration; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -20,6 +19,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -89,13 +89,16 @@ public void testDlsFeatureUsageTracking() throws Exception { prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse response = internalCluster().coordOnlyNodeClient() - .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(randomBoolean() ? QueryBuilders.termQuery("field1", "value1") : QueryBuilders.matchAllQuery()) - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "1"); + assertResponse( + internalCluster().coordOnlyNodeClient() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(randomBoolean() ? QueryBuilders.termQuery("field1", "value1") : QueryBuilders.matchAllQuery()), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "1"); + } + ); // coordinating only node should not tack DLS/FLS feature usage assertDlsFlsNotTrackedOnCoordOnlyNode(); @@ -109,13 +112,15 @@ public void testDlsFlsFeatureUsageNotTracked() { prepareIndex("test").setId("2").setSource("id", "2", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // Running a search with user2 (which has role3 without DLS/FLS) should not trigger feature tracking. - SearchResponse response = internalCluster().coordOnlyNodeClient() - .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertHitCount(response, 2); - assertSearchHits(response, "1", "2"); - + assertResponse( + internalCluster().coordOnlyNodeClient() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + } + ); assertDlsFlsNotTrackedAcrossAllNodes(); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java index 61126810e3df1..73897fc38633a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; @@ -21,6 +20,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -105,14 +105,17 @@ public void testDuelWithAliasFilters() throws Exception { builder.get(); for (int roleI = 1; roleI <= numberOfRoles; roleI++) { - SearchResponse searchResponse1 = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user" + roleI, USERS_PASSWD)) - ).prepareSearch("test").get(); - SearchResponse searchResponse2 = prepareSearch("alias" + roleI).get(); - assertThat(searchResponse1.getHits().getTotalHits().value, equalTo(searchResponse2.getHits().getTotalHits().value)); - for (int hitI = 0; hitI < searchResponse1.getHits().getHits().length; hitI++) { - assertThat(searchResponse1.getHits().getAt(hitI).getId(), equalTo(searchResponse2.getHits().getAt(hitI).getId())); - } + final int role = roleI; + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user" + roleI, USERS_PASSWD))) + .prepareSearch("test"), + searchResponse1 -> assertResponse(prepareSearch("alias" + role), searchResponse2 -> { + assertThat(searchResponse1.getHits().getTotalHits().value, equalTo(searchResponse2.getHits().getTotalHits().value)); + for (int hitI = 0; hitI < searchResponse1.getHits().getHits().length; hitI++) { + assertThat(searchResponse1.getHits().getAt(hitI).getId(), equalTo(searchResponse2.getHits().getAt(hitI).getId())); + } + }) + ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index e42fab4708b8a..c10dc7f1da25c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -92,7 +92,9 @@ import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; @@ -542,18 +544,19 @@ public void testPercolateQueryWithIndexedDocWithDLS() { .setRefreshPolicy(IMMEDIATE) .get(); // user1 can preform the percolate search for doc#1 in the doc_index because user1 has access to the doc - SearchResponse result = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("query_index").setQuery(new PercolateQueryBuilder("query", "doc_index", "1", null, null, null)).get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(new PercolateQueryBuilder("query", "doc_index", "1", null, null, null)), + 1 + ); // user2 can access the query_index itself (without performing percolate search) - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("query_index") - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(QueryBuilders.matchAllQuery()), + 1 + ); // user2 cannot access doc#1 of the doc_index so the percolate search fails because doc#1 cannot be found ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, @@ -587,7 +590,6 @@ public void testGeoQueryWithIndexedShapeWithDLS() { ShapeQueryBuilder shapeQuery = new ShapeQueryBuilder("search_field", "1").relation(ShapeRelation.WITHIN) .indexedShapeIndex("shape_index") .indexedShapePath("shape_field"); - SearchResponse result; // user1 has access to doc#1 of the shape_index so everything works SearchRequestBuilder requestBuilder = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) @@ -597,16 +599,14 @@ public void testGeoQueryWithIndexedShapeWithDLS() { } else { requestBuilder.setQuery(shapeQuery); } - result = requestBuilder.get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures(requestBuilder, 1); // user2 does not have access to doc#1 of the shape_index - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("search_index") - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("search_index") + .setQuery(QueryBuilders.matchAllQuery()), + 1 + ); IllegalArgumentException e; if (randomBoolean()) { e = expectThrows( @@ -696,8 +696,7 @@ public void testTermsLookupOnIndexWithDLS() { assertHitCount( client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) .prepareSearch("search_index") - .setQuery(lookup) - .get(), + .setQuery(lookup), 0 ); assertSearchHitsWithoutFailures( @@ -877,36 +876,48 @@ public void testKnnSearch() throws Exception { } // user1 should only be able to see docs with field1: value1 - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(query).addFetchField("field1").setSize(10).get(); - assertEquals(5, response.getHits().getTotalHits().value); - assertEquals(5, response.getHits().getHits().length); - for (SearchHit hit : response.getHits().getHits()) { - assertNotNull(hit.field("field1")); - } + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(query) + .addFetchField("field1") + .setSize(10), + response -> { + assertEquals(5, response.getHits().getTotalHits().value); + assertEquals(5, response.getHits().getHits().length); + for (SearchHit hit : response.getHits().getHits()) { + assertNotNull(hit.field("field1")); + } + } + ); // user2 should only be able to see docs with field2: value2 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(query) - .addFetchField("field2") - .setSize(10) - .get(); - assertEquals(5, response.getHits().getTotalHits().value); - assertEquals(5, response.getHits().getHits().length); - for (SearchHit hit : response.getHits().getHits()) { - assertNotNull(hit.field("field2")); - } + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(query) + .addFetchField("field2") + .setSize(10), + response -> { + assertEquals(5, response.getHits().getTotalHits().value); + assertEquals(5, response.getHits().getHits().length); + for (SearchHit hit : response.getHits().getHits()) { + assertNotNull(hit.field("field2")); + } + } + ); // user3 can see all indexed docs - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(query) - .setSize(10) - .get(); - assertEquals(10, response.getHits().getTotalHits().value); - assertEquals(10, response.getHits().getHits().length); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(query) + .setSize(10), + response -> { + assertEquals(10, response.getHits().getTotalHits().value); + assertEquals(10, response.getHits().getHits().length); + } + ); } public void testGlobalAggregation() throws Exception { @@ -918,53 +929,63 @@ public void testGlobalAggregation() throws Exception { prepareIndex("test").setId("2").setSource("field2", "value2").setRefreshPolicy(IMMEDIATE).get(); prepareIndex("test").setId("3").setSource("field3", "value3").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse response = prepareSearch("test").addAggregation( - AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2")) - ).get(); - assertHitCount(response, 3); - assertSearchHits(response, "1", "2", "3"); - - Global globalAgg = response.getAggregations().get("global"); - assertThat(globalAgg.getDocCount(), equalTo(3L)); - Terms termsAgg = globalAgg.getAggregations().get("field2"); - assertThat(termsAgg.getBuckets().get(0).getKeyAsString(), equalTo("value2")); - assertThat(termsAgg.getBuckets().get(0).getDocCount(), equalTo(1L)); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "1"); - - globalAgg = response.getAggregations().get("global"); - assertThat(globalAgg.getDocCount(), equalTo(1L)); - termsAgg = globalAgg.getAggregations().get("field2"); - assertThat(termsAgg.getBuckets().size(), equalTo(0)); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "2"); - - globalAgg = response.getAggregations().get("global"); - assertThat(globalAgg.getDocCount(), equalTo(1L)); - termsAgg = globalAgg.getAggregations().get("field2"); - assertThat(termsAgg.getBuckets().size(), equalTo(1)); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) - .get(); - assertHitCount(response, 2); - assertSearchHits(response, "1", "2"); - - globalAgg = response.getAggregations().get("global"); - assertThat(globalAgg.getDocCount(), equalTo(2L)); - termsAgg = globalAgg.getAggregations().get("field2"); - assertThat(termsAgg.getBuckets().size(), equalTo(1)); + assertResponse( + prepareSearch("test").addAggregation( + AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2")) + ), + response -> { + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + + Global globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(3L)); + Terms termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().get(0).getKeyAsString(), equalTo("value2")); + assertThat(termsAgg.getBuckets().get(0).getDocCount(), equalTo(1L)); + } + ); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "1"); + + Global globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(1L)); + Terms termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(0)); + } + ); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "2"); + + Global globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(1L)); + Terms termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(1)); + } + ); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))), + response -> { + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + + Global globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(2L)); + Terms termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(1)); + } + ); } public void testParentChild() throws Exception { @@ -1016,17 +1037,20 @@ public void testParentChild() throws Exception { } private void verifyParentChild() { - SearchResponse searchResponse = prepareSearch("test").setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); - - searchResponse = prepareSearch("test").setQuery(hasParentQuery("parent", matchAllQuery(), false)) - .addSort("id", SortOrder.ASC) - .get(); - assertHitCount(searchResponse, 3L); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); - assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("c3")); + assertResponse(prepareSearch("test").setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)), searchResponse -> { + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + }); + + assertResponse( + prepareSearch("test").setQuery(hasParentQuery("parent", matchAllQuery(), false)).addSort("id", SortOrder.ASC), + searchResponse -> { + assertHitCount(searchResponse, 3L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("c3")); + } + ); // Both user1 and user2 can't see field1 and field2, no parent/child query should yield results: assertHitCount( @@ -1058,20 +1082,26 @@ private void verifyParentChild() { ); // user 3 can see them but not c3 - searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) - .get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)), + searchResponse -> { + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + } + ); - searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(hasParentQuery("parent", matchAllQuery(), false)) - .get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); - assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasParentQuery("parent", matchAllQuery(), false)), + searchResponse -> { + assertHitCount(searchResponse, 2L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); + } + ); } public void testScroll() throws Exception { @@ -1111,6 +1141,7 @@ public void testScroll() throws Exception { break; } + response.decRef(); response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) ).prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1L)).get(); @@ -1118,6 +1149,7 @@ public void testScroll() throws Exception { } finally { if (response != null) { String scrollId = response.getScrollId(); + response.decRef(); if (scrollId != null) { client().prepareClearScroll().addScrollId(scrollId).get(); } @@ -1148,6 +1180,9 @@ public void testReaderId() throws Exception { SearchResponse response = null; try { for (int from = 0; from < numVisible; from++) { + if (response != null) { + response.decRef(); + } response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) ) @@ -1164,6 +1199,7 @@ public void testReaderId() throws Exception { } } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(response.pointInTimeId())).actionGet(); + response.decRef(); } } @@ -1181,27 +1217,30 @@ public void testRequestCache() throws Exception { int max = scaledRandomIntBetween(4, 32); for (int i = 0; i < max; i++) { Boolean requestCache = randomFrom(true, null); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setSize(0).setQuery(termQuery("field1", "value1")).setRequestCache(requestCache).get(); - assertNoFailures(response); - assertHitCount(response, 1); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setSize(0) - .setQuery(termQuery("field1", "value1")) - .setRequestCache(requestCache) - .get(); - assertNoFailures(response); - assertHitCount(response, 0); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .setSize(0) - .setQuery(termQuery("field1", "value1")) - .setRequestCache(requestCache) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache), + 1 + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache), + 0 + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache), + 1 + ); } } @@ -1278,27 +1317,34 @@ public void testNestedInnerHits() throws Exception { .get(); refresh("test"); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD)) - ) - .prepareSearch("test") - .setQuery( - QueryBuilders.nestedQuery("nested_field", QueryBuilders.termQuery("nested_field.field2", "value2"), ScoreMode.None) - .innerHit(new InnerHitBuilder()) - ) - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "1"); - assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getSourceAsString(), - equalTo("{\"field2\":\"value2\"}") - ); - assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getNestedIdentity().getOffset(), equalTo(1)); - assertThat( - response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getSourceAsString(), - equalTo("{\"field2\":[\"value2\",\"value3\"]}") + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery( + QueryBuilders.nestedQuery("nested_field", QueryBuilders.termQuery("nested_field.field2", "value2"), ScoreMode.None) + .innerHit(new InnerHitBuilder()) + ), + response -> { + assertHitCount(response, 1); + assertSearchHits(response, "1"); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getId(), equalTo("1")); + assertThat( + response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getNestedIdentity().getOffset(), + equalTo(0) + ); + assertThat( + response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getSourceAsString(), + equalTo("{\"field2\":\"value2\"}") + ); + assertThat( + response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getNestedIdentity().getOffset(), + equalTo(1) + ); + assertThat( + response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getSourceAsString(), + equalTo("{\"field2\":[\"value2\",\"value3\"]}") + ); + } ); } @@ -1342,16 +1388,19 @@ public void testSuggesters() throws Exception { ); // Term suggester: - SearchResponse response = prepareSearch("test").suggest( - new SuggestBuilder().setGlobalText("valeu").addSuggestion("_name1", new TermSuggestionBuilder("suggest_field1")) - ).get(); - assertNoFailures(response); - - TermSuggestion termSuggestion = response.getSuggest().getSuggestion("_name1"); - assertThat(termSuggestion, notNullValue()); - assertThat(termSuggestion.getEntries().size(), equalTo(1)); - assertThat(termSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); - assertThat(termSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + assertNoFailuresAndResponse( + prepareSearch("test").suggest( + new SuggestBuilder().setGlobalText("valeu").addSuggestion("_name1", new TermSuggestionBuilder("suggest_field1")) + ), + response -> { + + TermSuggestion termSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(termSuggestion, notNullValue()); + assertThat(termSuggestion.getEntries().size(), equalTo(1)); + assertThat(termSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(termSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + } + ); final String[] indices = randomFrom( List.of(new String[] { "test" }, new String[] { "fls-index", "test" }, new String[] { "test", "fls-index" }) @@ -1367,17 +1416,19 @@ public void testSuggesters() throws Exception { assertThat(e.getMessage(), equalTo("Suggest isn't supported if document level security is enabled")); // Phrase suggester: - response = prepareSearch("test").suggest( - new SuggestBuilder().setGlobalText("valeu").addSuggestion("_name1", new PhraseSuggestionBuilder("suggest_field1")) - ).get(); - assertNoFailures(response); - - PhraseSuggestion phraseSuggestion = response.getSuggest().getSuggestion("_name1"); - assertThat(phraseSuggestion, notNullValue()); - assertThat(phraseSuggestion.getEntries().size(), equalTo(1)); - assertThat(phraseSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); - assertThat(phraseSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); - + assertNoFailuresAndResponse( + prepareSearch("test").suggest( + new SuggestBuilder().setGlobalText("valeu").addSuggestion("_name1", new PhraseSuggestionBuilder("suggest_field1")) + ), + response -> { + + PhraseSuggestion phraseSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(phraseSuggestion, notNullValue()); + assertThat(phraseSuggestion.getEntries().size(), equalTo(1)); + assertThat(phraseSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(phraseSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + } + ); e = expectThrows( ElasticsearchSecurityException.class, () -> client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) @@ -1388,16 +1439,18 @@ public void testSuggesters() throws Exception { assertThat(e.getMessage(), equalTo("Suggest isn't supported if document level security is enabled")); // Completion suggester: - response = prepareSearch("test").suggest( - new SuggestBuilder().setGlobalText("valu").addSuggestion("_name1", new CompletionSuggestionBuilder("suggest_field2")) - ).get(); - assertNoFailures(response); - - CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("_name1"); - assertThat(completionSuggestion, notNullValue()); - assertThat(completionSuggestion.getEntries().size(), equalTo(1)); - assertThat(completionSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); - assertThat(completionSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + assertNoFailuresAndResponse( + prepareSearch("test").suggest( + new SuggestBuilder().setGlobalText("valu").addSuggestion("_name1", new CompletionSuggestionBuilder("suggest_field2")) + ), + response -> { + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(completionSuggestion, notNullValue()); + assertThat(completionSuggestion.getEntries().size(), equalTo(1)); + assertThat(completionSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(completionSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + } + ); e = expectThrows( ElasticsearchSecurityException.class, @@ -1433,18 +1486,20 @@ public void testProfile() throws Exception { .setMapping("field1", "type=text", "other_field", "type=text", "yet_another", "type=text") ); - SearchResponse response = prepareSearch("test").setProfile(true).setQuery(new FuzzyQueryBuilder("other_field", "valeu")).get(); - assertNoFailures(response); - - assertThat(response.getProfileResults().size(), equalTo(1)); - SearchProfileShardResult shardResult = response.getProfileResults().get(response.getProfileResults().keySet().toArray()[0]); - assertThat(shardResult.getQueryProfileResults().size(), equalTo(1)); - QueryProfileShardResult queryProfileShardResult = shardResult.getQueryProfileResults().get(0); - assertThat(queryProfileShardResult.getQueryResults().size(), equalTo(1)); - logger.info("queryProfileShardResult=" + Strings.toString(queryProfileShardResult)); - assertThat( - queryProfileShardResult.getQueryResults().stream().map(ProfileResult::getLuceneDescription).sorted().collect(toList()), - equalTo(List.of("(other_field:value)^0.8")) + assertNoFailuresAndResponse( + prepareSearch("test").setProfile(true).setQuery(new FuzzyQueryBuilder("other_field", "valeu")), + response -> { + assertThat(response.getProfileResults().size(), equalTo(1)); + SearchProfileShardResult shardResult = response.getProfileResults().get(response.getProfileResults().keySet().toArray()[0]); + assertThat(shardResult.getQueryProfileResults().size(), equalTo(1)); + QueryProfileShardResult queryProfileShardResult = shardResult.getQueryProfileResults().get(0); + assertThat(queryProfileShardResult.getQueryResults().size(), equalTo(1)); + logger.info("queryProfileShardResult=" + Strings.toString(queryProfileShardResult)); + assertThat( + queryProfileShardResult.getQueryResults().stream().map(ProfileResult::getLuceneDescription).sorted().collect(toList()), + equalTo(List.of("(other_field:value)^0.8")) + ); + } ); final String[] indices = randomFrom( diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java index 40672bf597b8c..34eecd57b53d5 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.integration; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -29,6 +28,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -194,64 +194,74 @@ public void testDuel() throws Exception { } indexRandom(true, requests); - SearchResponse actual = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD)) - ) - .prepareSearch("test") - .addSort("id", SortOrder.ASC) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("field1", "value")) - .should(QueryBuilders.termQuery("field2", "value")) - .should(QueryBuilders.termQuery("field3", "value")) + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("id", SortOrder.ASC) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ), + actual -> assertResponse( + prepareSearch("test").addSort("id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field1", "value"))), + expected -> { + assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + } ) - .get(); - SearchResponse expected = prepareSearch("test").addSort("id", SortOrder.ASC) - .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field1", "value"))) - .get(); - assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); - assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); - for (int i = 0; i < actual.getHits().getHits().length; i++) { - assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); - } + ); - actual = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .addSort("id", SortOrder.ASC) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("field1", "value")) - .should(QueryBuilders.termQuery("field2", "value")) - .should(QueryBuilders.termQuery("field3", "value")) + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addSort("id", SortOrder.ASC) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ), + actual -> assertResponse( + prepareSearch("test").addSort("id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field2", "value"))), + expected -> { + assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + } ) - .get(); - expected = prepareSearch("test").addSort("id", SortOrder.ASC) - .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field2", "value"))) - .get(); - assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); - assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); - for (int i = 0; i < actual.getHits().getHits().length; i++) { - assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); - } + ); - actual = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareSearch("test") - .addSort("id", SortOrder.ASC) - .setQuery( - QueryBuilders.boolQuery() - .should(QueryBuilders.termQuery("field1", "value")) - .should(QueryBuilders.termQuery("field2", "value")) - .should(QueryBuilders.termQuery("field3", "value")) + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("id", SortOrder.ASC) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ), + actual -> assertResponse( + prepareSearch("test").addSort("id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field3", "value"))), + expected -> { + assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + } ) - .get(); - expected = prepareSearch("test").addSort("id", SortOrder.ASC) - .setQuery(QueryBuilders.boolQuery().should(QueryBuilders.termQuery("field3", "value"))) - .get(); - assertThat(actual.getHits().getTotalHits().value, equalTo(expected.getHits().getTotalHits().value)); - assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); - for (int i = 0; i < actual.getHits().getHits().length; i++) { - assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); - } + ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 9c962095b3229..83be62beab4ec 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -79,7 +79,7 @@ import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; @@ -424,11 +424,16 @@ public void testKnnSearch() throws IOException { KnnVectorQueryBuilder query = new KnnVectorQueryBuilder("vector", queryVector, 10, null); // user1 has access to vector field, so the query should match with the document: - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(query).addFetchField("vector").get(); - assertHitCount(response, 1); - assertNotNull(response.getHits().getAt(0).field("vector")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(query) + .addFetchField("vector"), + response -> { + assertHitCount(response, 1); + assertNotNull(response.getHits().getAt(0).field("vector")); + } + ); // user2 has no access to vector field, so the query should not match with the document: assertHitCount( @@ -440,13 +445,15 @@ public void testKnnSearch() throws IOException { ); // check user2 cannot see the vector field, even when their search matches the document - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addFetchField("vector") - .get(); - assertHitCount(response, 1); - assertNull(response.getHits().getAt(0).field("vector")); - + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addFetchField("vector"), + response -> { + assertHitCount(response, 1); + assertNull(response.getHits().getAt(0).field("vector")); + } + ); // user1 can access field1, so the filtered query should match with the document: KnnVectorQueryBuilder filterQuery1 = new KnnVectorQueryBuilder("vector", queryVector, 10, null).addFilterQuery( QueryBuilders.matchQuery("field1", "value1") @@ -479,37 +486,38 @@ public void testPercolateQueryWithIndexedDocWithFLS() { {"field1": "value1", "field2": "A new bonsai tree in the office"}""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); QueryBuilder percolateQuery = new PercolateQueryBuilder("query", "doc_index", "1", null, null, null); // user7 sees everything - SearchResponse result = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD)) - ).prepareSearch("query_index").setQuery(percolateQuery).get(); - assertNoFailures(result); - assertHitCount(result, 1); - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("query_index") - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(percolateQuery), + 1 + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(QueryBuilders.matchAllQuery()), + 1 + ); // user 3 can see the fields of the percolated document, but not the "query" field of the indexed query - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("query_index") - .setQuery(percolateQuery) - .get(); - assertNoFailures(result); - assertHitCount(result, 0); - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user9", USERS_PASSWD))) - .prepareSearch("query_index") - .setQuery(QueryBuilders.matchAllQuery()) - .get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(percolateQuery), + 0 + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user9", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(QueryBuilders.matchAllQuery()), + 1 + ); // user 9 can see the fields of the index query, but not the field of the indexed document to be percolated - result = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user9", USERS_PASSWD))) - .prepareSearch("query_index") - .setQuery(percolateQuery) - .get(); - assertNoFailures(result); - assertHitCount(result, 0); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user9", USERS_PASSWD))) + .prepareSearch("query_index") + .setQuery(percolateQuery), + 0 + ); } public void testGeoQueryWithIndexedShapeWithFLS() { @@ -556,7 +564,6 @@ public void testGeoQueryWithIndexedShapeWithFLS() { ] } }""", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse result; // user sees both the querying shape and the queried point SearchRequestBuilder requestBuilder = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD)) @@ -569,9 +576,7 @@ public void testGeoQueryWithIndexedShapeWithFLS() { } else { requestBuilder.setQuery(shapeQuery1); } - result = requestBuilder.get(); - assertNoFailures(result); - assertHitCount(result, 1); + assertHitCountAndNoFailures(requestBuilder, 1); // user sees the queried point but not the querying shape final ShapeQueryBuilder shapeQuery2 = new ShapeQueryBuilder("field", "2").relation(ShapeRelation.WITHIN) .indexedShapeIndex("shape_index") @@ -607,9 +612,7 @@ public void testGeoQueryWithIndexedShapeWithFLS() { } else { requestBuilder.setQuery(shapeQuery3); } - result = requestBuilder.get(); - assertNoFailures(result); - assertHitCount(result, 0); + assertHitCountAndNoFailures(requestBuilder, 0); } public void testTermsLookupOnIndexWithFLS() { @@ -1118,6 +1121,7 @@ public void testScroll() throws Exception { break; } + response.decRef(); response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) ).prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1L)).get(); @@ -1126,6 +1130,7 @@ public void testScroll() throws Exception { } finally { if (response != null) { String scrollId = response.getScrollId(); + response.decRef(); if (scrollId != null) { client().prepareClearScroll().addScrollId(scrollId).get(); } @@ -1155,23 +1160,23 @@ public void testPointInTimeId() throws Exception { refresh("test"); String pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); - SearchResponse response = null; try { for (int from = 0; from < numDocs; from++) { - response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ) - .prepareSearch() - .setPointInTime(new PointInTimeBuilder(pitId)) - .setSize(1) - .setFrom(from) - .setQuery(constantScoreQuery(termQuery("field1", "value1"))) - .setFetchSource(true) - .get(); - assertThat(response.getHits().getTotalHits().value, is((long) numDocs)); - assertThat(response.getHits().getHits().length, is(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitId)) + .setSize(1) + .setFrom(from) + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .setFetchSource(true), + response -> { + assertThat(response.getHits().getTotalHits().value, is((long) numDocs)); + assertThat(response.getHits().getHits().length, is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + } + ); } } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); @@ -1191,26 +1196,35 @@ public void testQueryCache() throws Exception { int max = scaledRandomIntBetween(4, 32); for (int i = 0; i < max; i++) { - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(constantScoreQuery(termQuery("field1", "value1"))).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(constantScoreQuery(termQuery("field1", "value1"))) - .get(); - assertHitCount(response, 0); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(constantScoreQuery(termQuery("field1", "value1"))), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + } + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(constantScoreQuery(termQuery("field1", "value1"))), + 0 + ); String multipleFieldsUser = randomFrom("user5", "user6", "user7"); - response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD)) - ).prepareSearch("test").setQuery(constantScoreQuery(termQuery("field1", "value1"))).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(3)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertResponse( + client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD)) + ).prepareSearch("test").setQuery(constantScoreQuery(termQuery("field1", "value1"))), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + } + ); } } @@ -1250,6 +1264,7 @@ public void testScrollWithQueryCache() { assertThat(user2SearchResponse.getHits().getTotalHits().value, is((long) 0)); assertThat(user2SearchResponse.getHits().getHits().length, is(0)); } else { + user2SearchResponse.decRef(); // make sure scroll is empty user2SearchResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD)) @@ -1259,6 +1274,7 @@ public void testScrollWithQueryCache() { if (randomBoolean()) { // maybe reuse the scroll even if empty client().prepareClearScroll().addScrollId(user2SearchResponse.getScrollId()).get(); + user2SearchResponse.decRef(); user2SearchResponse = null; } } @@ -1279,6 +1295,7 @@ public void testScrollWithQueryCache() { assertThat(user1SearchResponse.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); scrolledDocsUser1++; } else { + user1SearchResponse.decRef(); user1SearchResponse = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) ).prepareSearchScroll(user1SearchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(10L)).get(); @@ -1295,6 +1312,7 @@ public void testScrollWithQueryCache() { if (user1SearchResponse.getScrollId() != null) { client().prepareClearScroll().addScrollId(user1SearchResponse.getScrollId()).get(); } + user1SearchResponse.decRef(); user1SearchResponse = null; scrolledDocsUser1 = 0; } @@ -1305,12 +1323,14 @@ public void testScrollWithQueryCache() { } finally { if (user1SearchResponse != null) { String scrollId = user1SearchResponse.getScrollId(); + user1SearchResponse.decRef(); if (scrollId != null) { client().prepareClearScroll().addScrollId(scrollId).get(); } } if (user2SearchResponse != null) { String scrollId = user2SearchResponse.getScrollId(); + user2SearchResponse.decRef(); if (scrollId != null) { client().prepareClearScroll().addScrollId(scrollId).get(); } @@ -1329,25 +1349,29 @@ public void testRequestCache() throws Exception { int max = scaledRandomIntBetween(4, 32); for (int i = 0; i < max; i++) { Boolean requestCache = randomFrom(true, null); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setSize(0).setQuery(termQuery("field1", "value1")).setRequestCache(requestCache).get(); - assertNoFailures(response); - assertHitCount(response, 1); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setSize(0) - .setQuery(termQuery("field1", "value1")) - .setRequestCache(requestCache) - .get(); - assertNoFailures(response); - assertHitCount(response, 0); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache), + 1 + ); + assertHitCountAndNoFailures( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache), + 0 + ); String multipleFieldsUser = randomFrom("user5", "user6", "user7"); - response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD)) - ).prepareSearch("test").setSize(0).setQuery(termQuery("field1", "value1")).setRequestCache(requestCache).get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertHitCountAndNoFailures( + client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD)) + ).prepareSearch("test").setSize(0).setQuery(termQuery("field1", "value1")).setRequestCache(requestCache), + 1 + ); } } @@ -1371,103 +1395,132 @@ public void testFields() throws Exception { .get(); // user1 is granted access to field1 only: - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").addStoredField("field1").addStoredField("field2").addStoredField("field3").get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + } + ); // user2 is granted access to field2 only: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + } + ); // user3 is granted access to field1 and field2: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + } + ); // user4 is granted access to no fields: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)) + ); // user5 has no field level security configured: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + } + ); // user6 has field level security configured with access to field*: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + } + ); // user7 has access to all fields due to a mix of roles without field level security and with: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + } + ); // user8 has field level security configured with access to field1 and field2: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("field1") - .addStoredField("field2") - .addStoredField("field3") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + } + ); // user1 is granted access to field1 only, and so should be able to load it by alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("alias") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().get("alias").getValue(), equalTo("value1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("alias"), + response -> { + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("alias").getValue(), equalTo("value1")); + } + ); // user2 is not granted access to field1, and so should not be able to load it by alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addStoredField("alias") - .get(); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("alias"), + response -> assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)) + ); } public void testSource() throws Exception { @@ -1478,67 +1531,89 @@ public void testSource() throws Exception { .get(); // user1 is granted access to field1 only: - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + } + ); // user2 is granted access to field2 only: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); // user3 is granted access to field1 and field2: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); // user4 is granted access to no fields: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test"), + response -> assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)) + ); // user5 has no field level security configured: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + } + ); // user6 has field level security configured with access to field*: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + } + ); // user7 has access to all fields - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + } + ); // user8 has field level security configured with access to field1 and field2: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) - .prepareSearch("test") - .get(); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test"), + response -> { + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); } public void testSort() { @@ -1548,45 +1623,52 @@ public void testSort() { prepareIndex("test").setId("1").setSource("field1", 1d, "field2", 2d).setRefreshPolicy(IMMEDIATE).get(); // user1 is granted to use field1, so it is included in the sort_values - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").addSort("field1", SortOrder.ASC).get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(1L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field1", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(1L)) + ); // user2 is not granted to use field1, so the default missing sort value is included - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addSort("field1", SortOrder.ASC) - .get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field1", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)) + ); // user1 is not granted to use field2, so the default missing sort value is included - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addSort("field2", SortOrder.ASC) - .get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field2", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)) + ); // user2 is granted to use field2, so it is included in the sort_values - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addSort("field2", SortOrder.ASC) - .get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(2L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field2", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(2L)) + ); // user1 is granted to use field1, so it is included in the sort_values when using its alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addSort("alias", SortOrder.ASC) - .get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(1L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addSort("alias", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(1L)) + ); // user2 is not granted to use field1, so the default missing sort value is included when using its alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addSort("alias", SortOrder.ASC) - .get(); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("alias", SortOrder.ASC), + response -> assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)) + ); } public void testHighlighting() { @@ -1600,42 +1682,56 @@ public void testHighlighting() { .get(); // user1 has access to field1, so the highlight should be visible: - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(matchQuery("field1", "value1")).highlighter(new HighlightBuilder().field("field1")).get(); - assertHitCount(response, 1); - SearchHit hit = response.getHits().iterator().next(); - assertEquals(hit.getHighlightFields().size(), 1); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .highlighter(new HighlightBuilder().field("field1")), + response -> { + assertHitCount(response, 1); + SearchHit hit = response.getHits().iterator().next(); + assertEquals(hit.getHighlightFields().size(), 1); + } + ); // user2 has no access to field1, so the highlight should not be visible: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(matchQuery("field2", "value2")) - .highlighter(new HighlightBuilder().field("field1")) - .get(); - assertHitCount(response, 1); - hit = response.getHits().iterator().next(); - assertEquals(hit.getHighlightFields().size(), 0); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .highlighter(new HighlightBuilder().field("field1")), + response -> { + assertHitCount(response, 1); + var hit = response.getHits().iterator().next(); + assertEquals(hit.getHighlightFields().size(), 0); + } + ); // user1 has access to field1, so the highlight on its alias should be visible: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(matchQuery("field1", "value1")) - .highlighter(new HighlightBuilder().field("alias")) - .get(); - assertHitCount(response, 1); - hit = response.getHits().iterator().next(); - assertEquals(hit.getHighlightFields().size(), 1); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .highlighter(new HighlightBuilder().field("alias")), + response -> { + assertHitCount(response, 1); + var hit = response.getHits().iterator().next(); + assertEquals(hit.getHighlightFields().size(), 1); + } + ); // user2 has no access to field1, so the highlight on its alias should not be visible: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(matchQuery("field2", "value2")) - .highlighter(new HighlightBuilder().field("alias")) - .get(); - assertHitCount(response, 1); - hit = response.getHits().iterator().next(); - assertEquals(hit.getHighlightFields().size(), 0); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .highlighter(new HighlightBuilder().field("alias")), + response -> { + assertHitCount(response, 1); + var hit = response.getHits().iterator().next(); + assertEquals(hit.getHighlightFields().size(), 0); + } + ); } public void testAggs() { @@ -1646,45 +1742,52 @@ public void testAggs() { prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // user1 is authorized to use field1, so buckets are include for a term agg on field1 - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").addAggregation(AggregationBuilders.terms("_name").field("field1")).get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1").getDocCount(), equalTo(1L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field1")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1").getDocCount(), equalTo(1L)) + ); // user2 is not authorized to use field1, so no buckets are include for a term agg on field1 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.terms("_name").field("field1")) - .get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1"), nullValue()); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field1")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1"), nullValue()) + ); // user1 is not authorized to use field2, so no buckets are include for a term agg on field2 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.terms("_name").field("field2")) - .get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2"), nullValue()); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field2")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2"), nullValue()) + ); // user2 is authorized to use field2, so buckets are include for a term agg on field2 - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.terms("_name").field("field2")) - .get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2").getDocCount(), equalTo(1L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field2")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2").getDocCount(), equalTo(1L)) + ); // user1 is authorized to use field1, so buckets are include for a term agg on its alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.terms("_name").field("alias")) - .get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1").getDocCount(), equalTo(1L)); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("alias")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1").getDocCount(), equalTo(1L)) + ); // user2 is not authorized to use field1, so no buckets are include for a term agg on its alias: - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) - .prepareSearch("test") - .addAggregation(AggregationBuilders.terms("_name").field("alias")) - .get(); - assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1"), nullValue()); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("alias")), + response -> assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1"), nullValue()) + ); } public void testTVApi() throws Exception { @@ -1913,12 +2016,16 @@ public void testParentChild() throws Exception { } private void verifyParentChild() { - SearchResponse searchResponse = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(hasChildQuery("child", termQuery("field1", "yellow"), ScoreMode.None)).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", termQuery("field1", "yellow"), ScoreMode.None)), + searchResponse -> { + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + } + ); assertHitCount( client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) @@ -1928,13 +2035,16 @@ private void verifyParentChild() { ); // Perform the same checks, but using an alias for field1. - searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(hasChildQuery("child", termQuery("alias", "yellow"), ScoreMode.None)) - .get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", termQuery("alias", "yellow"), ScoreMode.None)), + searchResponse -> { + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + } + ); assertHitCount( client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) @@ -1991,22 +2101,29 @@ public void testQuery_withRoleWithFieldWildcards() { prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); // user6 has access to all fields, so the query should match with the document: - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD)) - ).prepareSearch("test").setQuery(matchQuery("field1", "value1")).get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) - .prepareSearch("test") - .setQuery(matchQuery("field2", "value2")) - .get(); - assertHitCount(response, 1); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")), + response -> { + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + ); } public void testExistQuery() { @@ -2140,76 +2257,83 @@ public void testLookupRuntimeFields() throws Exception { .sort("field1") .runtimeMappings(Map.of("host", lookupField)) ); - SearchResponse response; // user1 has access to field1 - response = client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .search(request) - .actionGet(); - assertHitCount(response, 2); - { - SearchHit hit0 = response.getHits().getHits()[0]; - assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "host"))); - assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); - assertThat(hit0.field("host").getValues(), equalTo(List.of(Map.of("field1", List.of("192.168.1.1"))))); - } - { - SearchHit hit1 = response.getHits().getHits()[1]; - assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "host"))); - assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); - assertThat(hit1.field("host").getValues(), equalTo(List.of(Map.of("field1", List.of("192.168.1.2"))))); - } + assertResponse( + client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))).search(request), + response -> { + assertHitCount(response, 2); + { + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "host"))); + assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); + assertThat(hit0.field("host").getValues(), equalTo(List.of(Map.of("field1", List.of("192.168.1.1"))))); + } + { + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "host"))); + assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); + assertThat(hit1.field("host").getValues(), equalTo(List.of(Map.of("field1", List.of("192.168.1.2"))))); + } + } + ); // user3 has access to field1, field2 - response = client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) - .search(request) - .actionGet(); - assertHitCount(response, 2); - { - SearchHit hit0 = response.getHits().getHits()[0]; - assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "host"))); - assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); - assertThat(hit0.field("field2").getValues(), equalTo(List.of("out of memory"))); - assertThat( - hit0.field("host").getValues(), - equalTo(List.of(Map.of("field1", List.of("192.168.1.1"), "field2", List.of("windows")))) - ); - } - { - SearchHit hit1 = response.getHits().getHits()[1]; - assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "host"))); - assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); - assertThat(hit1.field("field2").getValues(), equalTo(List.of("authentication fails"))); - assertThat( - hit1.field("host").getValues(), - equalTo(List.of(Map.of("field1", List.of("192.168.1.2"), "field2", List.of("macos")))) - ); - } + assertResponse( + client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))).search(request), + response -> { + assertHitCount(response, 2); + { + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "host"))); + assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); + assertThat(hit0.field("field2").getValues(), equalTo(List.of("out of memory"))); + assertThat( + hit0.field("host").getValues(), + equalTo(List.of(Map.of("field1", List.of("192.168.1.1"), "field2", List.of("windows")))) + ); + } + { + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "host"))); + assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); + assertThat(hit1.field("field2").getValues(), equalTo(List.of("authentication fails"))); + assertThat( + hit1.field("host").getValues(), + equalTo(List.of(Map.of("field1", List.of("192.168.1.2"), "field2", List.of("macos")))) + ); + } + } + ); // user6 has access to field1, field2, and field3 - response = client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) - .search(request) - .actionGet(); - assertHitCount(response, 2); - { - SearchHit hit0 = response.getHits().getHits()[0]; - assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "field3", "host"))); - assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); - assertThat(hit0.field("field2").getValues(), equalTo(List.of("out of memory"))); - assertThat(hit0.field("field3").getValues(), equalTo(List.of("2021-01-20"))); - assertThat( - hit0.field("host").getValues(), - equalTo(List.of(Map.of("field1", List.of("192.168.1.1"), "field2", List.of("windows"), "field3", List.of("canada")))) - ); - } - { - SearchHit hit1 = response.getHits().getHits()[1]; - assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "field3", "host"))); - assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); - assertThat(hit1.field("field2").getValues(), equalTo(List.of("authentication fails"))); - assertThat(hit1.field("field3").getValues(), equalTo(List.of("2021-01-21"))); - assertThat( - hit1.field("host").getValues(), - equalTo(List.of(Map.of("field1", List.of("192.168.1.2"), "field2", List.of("macos"), "field3", List.of("us")))) - ); - } + assertResponse( + client().filterWithHeader(Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))).search(request), + response -> { + assertHitCount(response, 2); + { + SearchHit hit0 = response.getHits().getHits()[0]; + assertThat(hit0.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "field3", "host"))); + assertThat(hit0.field("field1").getValues(), equalTo(List.of("192.168.1.1"))); + assertThat(hit0.field("field2").getValues(), equalTo(List.of("out of memory"))); + assertThat(hit0.field("field3").getValues(), equalTo(List.of("2021-01-20"))); + assertThat( + hit0.field("host").getValues(), + equalTo( + List.of(Map.of("field1", List.of("192.168.1.1"), "field2", List.of("windows"), "field3", List.of("canada"))) + ) + ); + } + { + SearchHit hit1 = response.getHits().getHits()[1]; + assertThat(hit1.getDocumentFields().keySet(), equalTo(Set.of("field1", "field2", "field3", "host"))); + assertThat(hit1.field("field1").getValues(), equalTo(List.of("192.168.1.2"))); + assertThat(hit1.field("field2").getValues(), equalTo(List.of("authentication fails"))); + assertThat(hit1.field("field3").getValues(), equalTo(List.of("2021-01-21"))); + assertThat( + hit1.field("host").getValues(), + equalTo(List.of(Map.of("field1", List.of("192.168.1.2"), "field2", List.of("macos"), "field3", List.of("us")))) + ); + } + } + ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java index 0e799589409f8..0566784e28153 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.settings.SecureString; @@ -35,6 +34,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; @@ -134,52 +134,67 @@ public void testSearchResolveWildcardsRegexs() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(QueryBuilders.termQuery("_id", "1")).get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - Map source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field1"), equalTo("value1")); - - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("my_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field2"), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + Map source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field1"), equalTo("value1")); + } + ); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("my_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field2"), equalTo("value2")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("an_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("an_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("*_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(2)); - assertThat((String) source.get("field2"), equalTo("value2")); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("*_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(2)); + assertThat((String) source.get("field2"), equalTo("value2")); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("*_alias", "t*") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(3)); - assertThat((String) source.get("field1"), equalTo("value1")); - assertThat((String) source.get("field2"), equalTo("value2")); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("*_alias", "t*") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(3)); + assertThat((String) source.get("field1"), equalTo("value1")); + assertThat((String) source.get("field2"), equalTo("value2")); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); } public void testSearchResolveDataStreams() throws Exception { @@ -201,52 +216,68 @@ public void testSearchResolveDataStreams() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); - SearchResponse response = client().filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD)) - ).prepareSearch("test").setQuery(QueryBuilders.termQuery("_id", "1")).get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - Map source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field1"), equalTo("value1")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + Map source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field1"), equalTo("value1")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("my_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field2"), equalTo("value2")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("my_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field2"), equalTo("value2")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("an_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(1)); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("an_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(1)); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("*_alias") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(2)); - assertThat((String) source.get("field2"), equalTo("value2")); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("*_alias") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(2)); + assertThat((String) source.get("field2"), equalTo("value2")); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); - response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) - .prepareSearch("*_alias", "t*") - .setQuery(QueryBuilders.termQuery("_id", "1")) - .get(); - assertThat(response.getHits().getHits().length, equalTo(1)); - source = response.getHits().getHits()[0].getSourceAsMap(); - assertThat(source.size(), equalTo(3)); - assertThat((String) source.get("field1"), equalTo("value1")); - assertThat((String) source.get("field2"), equalTo("value2")); - assertThat((String) source.get("field3"), equalTo("value3")); + assertResponse( + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("*_alias", "t*") + .setQuery(QueryBuilders.termQuery("_id", "1")), + response -> { + assertThat(response.getHits().getHits().length, equalTo(1)); + var source = response.getHits().getHits()[0].getSourceAsMap(); + assertThat(source.size(), equalTo(3)); + assertThat((String) source.get("field1"), equalTo("value1")); + assertThat((String) source.get("field2"), equalTo("value2")); + assertThat((String) source.get("field3"), equalTo("value3")); + } + ); } private void putComposableIndexTemplate(String id, List patterns) throws IOException { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index afe9e68716579..d4375d15e6a6d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Strings; @@ -24,6 +23,7 @@ import java.util.Map; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -102,32 +102,34 @@ public void testSearchAndMSearch() throws Exception { final String field = "foo"; indexRandom(true, prepareIndex(index).setSource(field, "bar")); - SearchResponse response = prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); - final long hits = response.getHits().getTotalHits().value; - assertThat(hits, greaterThan(0L)); - response = client().filterWithHeader( - singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) - ).prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); - assertEquals(response.getHits().getTotalHits().value, hits); - - final long multiHits; - MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() - .add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())) - .get(); - try { - multiHits = multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value; + assertResponse(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()), response -> { + final long hits = response.getHits().getTotalHits().value; assertThat(hits, greaterThan(0L)); - } finally { - multiSearchResponse.decRef(); - } - multiSearchResponse = client().filterWithHeader( - singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) - ).prepareMultiSearch().add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())).get(); - try { - assertEquals(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, multiHits); - } finally { - multiSearchResponse.decRef(); - } + assertResponse( + client().filterWithHeader( + singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) + ).prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()), + response2 -> assertEquals(response2.getHits().getTotalHits().value, hits) + ); + final long multiHits; + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())) + .get(); + try { + multiHits = multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value; + assertThat(hits, greaterThan(0L)); + } finally { + multiSearchResponse.decRef(); + } + multiSearchResponse = client().filterWithHeader( + singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD)) + ).prepareMultiSearch().add(prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())).get(); + try { + assertEquals(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits().value, multiHits); + } finally { + multiSearchResponse.decRef(); + } + }); } public void testGetIndex() throws Exception { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 2e5d92839d3f7..08fb0c79a076c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.MultiSearchResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -39,7 +38,9 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -144,19 +145,13 @@ public void testSingleRole() throws Exception { Client client = client(); // no specifying an index, should replace indices with the permitted ones (test & test1) - SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); + assertHitCountAndNoFailures(prepareSearch().setQuery(matchAllQuery()), 2); // _all should expand to all the permitted indices - searchResponse = client.prepareSearch("_all").setQuery(matchAllQuery()).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); + assertHitCountAndNoFailures(client.prepareSearch("_all").setQuery(matchAllQuery()), 2); // wildcards should expand to all the permitted indices - searchResponse = client.prepareSearch("test*").setQuery(matchAllQuery()).get(); - assertNoFailures(searchResponse); - assertHitCount(searchResponse, 2); + assertHitCountAndNoFailures(client.prepareSearch("test*").setQuery(matchAllQuery()), 2); try { client.prepareSearch("test", "test2").setQuery(matchAllQuery()).get(); @@ -174,7 +169,7 @@ public void testSingleRole() throws Exception { MultiSearchResponse.Item[] items = msearchResponse.getResponses(); assertThat(items.length, is(2)); assertThat(items[0].isFailure(), is(false)); - searchResponse = items[0].getResponse(); + var searchResponse = items[0].getResponse(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1); assertThat(items[1].isFailure(), is(false)); @@ -252,18 +247,18 @@ public void testMultipleRoles() throws Exception { Client client = client(); - SearchResponse response = client.filterWithHeader( - Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD)) - ).prepareSearch("a").get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertHitCountAndNoFailures( + client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) + .prepareSearch("a"), + 1 + ); String[] indices = randomDouble() < 0.3 ? new String[] { "_all" } : randomBoolean() ? new String[] { "*" } : new String[] {}; - response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) - .prepareSearch(indices) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertHitCountAndNoFailures( + client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD))) + .prepareSearch(indices), + 1 + ); try { indices = randomBoolean() ? new String[] { "a", "b" } : new String[] { "b", "a" }; @@ -279,25 +274,25 @@ public void testMultipleRoles() throws Exception { assertThat(e.status(), is(RestStatus.FORBIDDEN)); } - response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) - .prepareSearch("b") - .get(); - assertNoFailures(response); - assertHitCount(response, 1); + assertHitCountAndNoFailures( + client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) + .prepareSearch("b"), + 1 + ); indices = randomBoolean() ? new String[] { "a", "b" } : new String[] { "b", "a" }; - response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) - .prepareSearch(indices) - .get(); - assertNoFailures(response); - assertHitCount(response, 2); + assertHitCountAndNoFailures( + client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) + .prepareSearch(indices), + 2 + ); indices = randomDouble() < 0.3 ? new String[] { "_all" } : randomBoolean() ? new String[] { "*" } : new String[] {}; - response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) - .prepareSearch(indices) - .get(); - assertNoFailures(response); - assertHitCount(response, 2); + assertHitCountAndNoFailures( + client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", USERS_PASSWD))) + .prepareSearch(indices), + 2 + ); } public void testMultiNamesWorkCorrectly() { @@ -313,8 +308,10 @@ public void testMultiNamesWorkCorrectly() { Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", USERS_PASSWD)) ); - final SearchResponse searchResponse = userAClient.prepareSearch("alias1").setSize(0).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + assertResponse( + userAClient.prepareSearch("alias1").setSize(0), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)) + ); final ElasticsearchSecurityException e1 = expectThrows( ElasticsearchSecurityException.class, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java index 30f8507325a7e..82622b03d8d52 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityCachePermissionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.integration; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.SecurityIntegTestCase; @@ -16,6 +15,7 @@ import org.junit.Before; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; @@ -47,15 +47,19 @@ public void loadData() { } public void testThatTermsFilterQueryDoesntLeakData() { - SearchResponse response = prepareSearch("data").setQuery( - QueryBuilders.constantScoreQuery(QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "1", "tokens"))) - ).get(); - assertThat(response.isTimedOut(), is(false)); - assertThat(response.getHits().getHits().length, is(1)); + assertResponse( + prepareSearch("data").setQuery( + QueryBuilders.constantScoreQuery(QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "1", "tokens"))) + ), + response -> { + assertThat(response.isTimedOut(), is(false)); + assertThat(response.getHits().getHits().length, is(1)); + } + ); // Repeat with unauthorized user!!!! try { - response = client().filterWithHeader( + var response = client().filterWithHeader( singletonMap( "Authorization", basicAuthHeaderValue(READ_ONE_IDX_USER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java index c1925b71608c1..7fc4c1520f9c6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -92,21 +92,14 @@ public void testFetchAllByEntityWithBrokenScroll() { false, 1 ); - SearchResponse response = new SearchResponse( - internalResponse, - scrollId, - 1, - 1, - 0, - 0, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); Answer returnResponse = invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(response); + ActionListener.respondAndRelease( + listener, + new SearchResponse(internalResponse, scrollId, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) + ); return null; }; doAnswer(returnResponse).when(client).search(eq(request), any()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index cb07cd76a5faa..d6aea4c64b246 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; @@ -81,6 +80,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; @@ -319,9 +319,10 @@ private void testAddUserAndRoleThenAuth(String username, String roleName) { prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue(username, new SecureString("s3krit-password")); - SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); - - assertEquals(1L, searchResp.getHits().getTotalHits().value); + assertResponse( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx"), + searchResp -> assertEquals(1L, searchResp.getHits().getTotalHits().value) + ); assertClusterHealthOnlyAuthorizesWhenAnonymousRoleActive(token); } @@ -341,9 +342,10 @@ public void testUpdatingUserAndAuthentication() throws Exception { // Index a document with the default test user prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue("joe", new SecureString("s3krit-password")); - SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); - - assertEquals(1L, searchResp.getHits().getTotalHits().value); + assertResponse( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx"), + searchResp -> assertEquals(1L, searchResp.getHits().getTotalHits().value) + ); preparePutUser("joe", "s3krit-password2", hasher, SecuritySettingsSource.TEST_ROLE).get(); @@ -356,8 +358,10 @@ public void testUpdatingUserAndAuthentication() throws Exception { } token = basicAuthHeaderValue("joe", new SecureString("s3krit-password2")); - searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); - assertEquals(1L, searchResp.getHits().getTotalHits().value); + assertResponse( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx"), + searchResp -> assertEquals(1L, searchResp.getHits().getTotalHits().value) + ); } public void testCreateDeleteAuthenticate() { @@ -375,9 +379,10 @@ public void testCreateDeleteAuthenticate() { // Index a document with the default test user prepareIndex("idx").setId("1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); String token = basicAuthHeaderValue("joe", new SecureString("s3krit-password")); - SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); - - assertEquals(1L, searchResp.getHits().getTotalHits().value); + assertResponse( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx"), + searchResp -> assertEquals(1L, searchResp.getHits().getTotalHits().value) + ); DeleteUserResponse response = new DeleteUserRequestBuilder(client()).username("joe").get(); assertThat(response.found(), is(true)); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java index 7e5fd3a8717e2..f34983f7f125c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -442,12 +442,7 @@ private static T expectThrows(Class expectedType, Searc } private static void assertReturnedIndices(SearchRequestBuilder searchRequestBuilder, String... indices) { - var searchResponse = searchRequestBuilder.get(); - try { - assertReturnedIndices(searchResponse, indices); - } finally { - searchResponse.decRef(); - } + assertResponse(searchRequestBuilder, searchResponse -> assertReturnedIndices(searchResponse, indices)); } private static void assertReturnedIndices(SearchResponse searchResponse, String... indices) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java index c5da26deaf03d..1b62c79236a9c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -24,6 +24,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -46,29 +47,32 @@ public void testScrollIsPerUser() throws Exception { } indexRandom(true, docs); - SearchResponse response = prepareSearch("foo").setScroll(TimeValue.timeValueSeconds(5L)).setQuery(matchAllQuery()).setSize(1).get(); - assertEquals(numDocs, response.getHits().getTotalHits().value); - assertEquals(1, response.getHits().getHits().length); - - if (randomBoolean()) { - response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueSeconds(5L)).get(); + assertResponse(prepareSearch("foo").setScroll(TimeValue.timeValueSeconds(5L)).setQuery(matchAllQuery()).setSize(1), response -> { assertEquals(numDocs, response.getHits().getTotalHits().value); assertEquals(1, response.getHits().getHits().length); - } - - final String scrollId = response.getScrollId(); - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().filterWithHeader( - Collections.singletonMap( - "Authorization", - UsernamePasswordToken.basicAuthHeaderValue("other", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING) - ) - ).prepareSearchScroll(scrollId).get() - ); - for (ShardSearchFailure failure : e.shardFailures()) { - assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); - } + if (randomBoolean()) { + assertResponse( + client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueSeconds(5L)), + response2 -> { + assertEquals(numDocs, response2.getHits().getTotalHits().value); + assertEquals(1, response2.getHits().getHits().length); + } + ); + } + final String scrollId = response.getScrollId(); + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().filterWithHeader( + Collections.singletonMap( + "Authorization", + UsernamePasswordToken.basicAuthHeaderValue("other", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING) + ) + ).prepareSearchScroll(scrollId).get() + ); + for (ShardSearchFailure failure : e.shardFailures()) { + assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); + } + }); } public void testSearchAndClearScroll() throws Exception { @@ -87,12 +91,14 @@ public void testSearchAndClearScroll() throws Exception { do { assertHitCount(response, docs.length); hits += response.getHits().getHits().length; + response.decRef(); response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueSeconds(5L)).get(); } while (response.getHits().getHits().length != 0); assertThat(hits, equalTo(docs.length)); } finally { clearScroll(response.getScrollId()); + response.decRef(); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java index 66c5b9fa02ab4..f43275c2d8b70 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.security.profile; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -47,6 +46,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; import static org.hamcrest.Matchers.containsString; @@ -368,26 +368,31 @@ public void testDomainCaptureForServiceToken() throws IOException { .get(); } - private void assertAccessToken(CreateTokenResponse createTokenResponse) throws IOException { + private void assertAccessToken(CreateTokenResponse createTokenResponse) { client().filterWithHeader(Map.of("Authorization", "Bearer " + createTokenResponse.getTokenString())) .admin() .cluster() .prepareHealth() .get(); - final SearchResponse searchResponse = prepareSearch(SecuritySystemIndices.SECURITY_TOKENS_ALIAS).get(); - - final String encodedAuthentication = createTokenResponse.getAuthentication().encode(); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { - final XContentTestUtils.JsonMapView responseView = XContentTestUtils.createJsonMapView( - new ByteArrayInputStream(searchHit.getSourceAsString().getBytes(StandardCharsets.UTF_8)) - ); - if (encodedAuthentication.equals(responseView.get("access_token.user_token.authentication"))) { - if (isOtherDomain) { - assertThat(responseView.get("access_token.realm_domain"), equalTo(OTHER_DOMAIN_REALM_MAP)); - } else { - assertThat(responseView.get("access_token.realm_domain"), nullValue()); + assertResponse(prepareSearch(SecuritySystemIndices.SECURITY_TOKENS_ALIAS), searchResponse -> { + final String encodedAuthentication; + try { + encodedAuthentication = createTokenResponse.getAuthentication().encode(); + } catch (IOException e) { + throw new AssertionError(e); + } + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + final XContentTestUtils.JsonMapView responseView = XContentTestUtils.createJsonMapView( + new ByteArrayInputStream(searchHit.getSourceAsString().getBytes(StandardCharsets.UTF_8)) + ); + if (encodedAuthentication.equals(responseView.get("access_token.user_token.authentication"))) { + if (isOtherDomain) { + assertThat(responseView.get("access_token.realm_domain"), equalTo(OTHER_DOMAIN_REALM_MAP)); + } else { + assertThat(responseView.get("access_token.realm_domain"), nullValue()); + } } } - } + }); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index eb702ed281014..8743453d33a35 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -198,47 +198,51 @@ protected void SearchRequest searchRequest = (SearchRequest) request; searchRequests.add(searchRequest); final SearchHit[] hits = searchFunction.apply(searchRequest); - final SearchResponse response = new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1 + ), + "_scrollId1", + 1, + 1, + 0, + 1, null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null + null + ) ); - listener.onResponse((Response) response); } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchScrollRequest.class)); final SearchHit[] hits = new SearchHit[0]; - final SearchResponse response = new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1 + ), + "_scrollId1", + 1, + 1, + 0, + 1, null, - null, - false, - false, - null, - 1 - ), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null + null + ) ); - listener.onResponse((Response) response); } else if (TransportClearScrollAction.NAME.equals(action.name())) { assertThat(request, instanceOf(ClearScrollRequest.class)); ClearScrollRequest scrollRequest = (ClearScrollRequest) request; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 2031cd4f7685b..25194ca1e0234 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -275,7 +275,7 @@ public void testGetApiKeys() throws Exception { doAnswer(invocationOnMock -> { searchRequest.set((SearchRequest) invocationOnMock.getArguments()[0]); ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; - listener.onResponse(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); + ActionListener.respondAndRelease(listener, SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); String[] realmNames = generateRandomStringArray(4, 4, true, true); @@ -336,7 +336,7 @@ public void testInvalidateApiKeys() throws Exception { doAnswer(invocationOnMock -> { searchRequest.set((SearchRequest) invocationOnMock.getArguments()[0]); ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; - listener.onResponse(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); + ActionListener.respondAndRelease(listener, SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); PlainActionFuture listener = new PlainActionFuture<>(); @@ -427,17 +427,10 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { null, 0 ); - final var searchResponse = new SearchResponse( - internalSearchResponse, - randomAlphaOfLengthBetween(3, 8), - 1, - 1, - 0, - 10, - null, - null + ActionListener.respondAndRelease( + listener, + new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) ); - listener.onResponse(searchResponse); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); @@ -756,33 +749,35 @@ public void testCrossClusterApiKeyUsageStats() { final AtomicReference searchRequest = new AtomicReference<>(); doAnswer(invocationOnMock -> { searchRequest.set(invocationOnMock.getArgument(0)); - final var searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits( - searchHits.toArray(SearchHit[]::new), - new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), - randomFloat(), + final ActionListener listener = invocationOnMock.getArgument(1); + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new InternalSearchResponse( + new SearchHits( + searchHits.toArray(SearchHit[]::new), + new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, null, null, - null + false, + null, + 0 ), + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, null, - null, - null, - false, - null, - 0 - ), - randomAlphaOfLengthBetween(3, 8), - 1, - 1, - 0, - 10, - null, - null + null + ) ); - final ActionListener listener = invocationOnMock.getArgument(1); - listener.onResponse(searchResponse); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index 2dec4eb8ea2b5..8d5d89b4c5054 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -280,18 +280,10 @@ public void testFindTokensFor() { null, 0 ); - - final SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - randomAlphaOfLengthBetween(3, 8), - 1, - 1, - 0, - 10, - null, - null + ActionListener.respondAndRelease( + l, + new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) ); - l.onResponse(searchResponse); } else if (r instanceof ClearScrollRequest) { l.onResponse(new ClearScrollResponse(true, 1)); } else { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index bcb335c7cf9bc..3b52f86c00ba8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -371,17 +371,10 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi null, 0 ); - final var searchResponse = new SearchResponse( - internalSearchResponse, - randomAlphaOfLengthBetween(3, 8), - 1, - 1, - 0, - 10, - null, - null + ActionListener.respondAndRelease( + listener, + new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) ); - listener.onResponse(searchResponse); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 53df6e6157282..d229124419cb2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -134,7 +134,7 @@ protected void @Override public void searchScroll(SearchScrollRequest request, ActionListener listener) { - listener.onResponse(SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); + ActionListener.respondAndRelease(listener, SearchResponse.empty(() -> 1L, SearchResponse.Clusters.EMPTY)); } }; securityIndex = mock(SecurityIndexManager.class); @@ -189,7 +189,7 @@ public void testGetSinglePrivilegeByName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -198,7 +198,7 @@ public void testGetMissingPrivilege() throws InterruptedException, ExecutionExce final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(List.of("myapp"), List.of("admin"), future); final SearchHit[] hits = new SearchHit[0]; - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); final Collection applicationPrivilegeDescriptors = future.get(1, TimeUnit.SECONDS); assertThat(applicationPrivilegeDescriptors, empty()); @@ -225,7 +225,7 @@ public void testGetPrivilegesByApplicationName() throws Exception { {"term":{"type":{"value":"application-privilege\"""")); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -283,7 +283,7 @@ public void testGetPrivilegesByWildcardApplicationName() throws Exception { } final SearchHit[] hits = buildHits(allowExpensiveQueries ? sourcePrivileges.subList(1, 4) : sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); // The first and last privilege should not be retrieved assertResult(sourcePrivileges.subList(1, 4), future); } @@ -300,7 +300,7 @@ public void testGetPrivilegesByStarApplicationName() throws Exception { assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); final SearchHit[] hits = new SearchHit[0]; - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); } public void testGetAllPrivileges() throws Exception { @@ -321,7 +321,7 @@ public void testGetAllPrivileges() throws Exception { assertThat(query, not(containsString("{\"terms\""))); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } @@ -337,7 +337,7 @@ public void testGetPrivilegesCacheByApplicationNames() throws Exception { store.getPrivileges(List.of("myapp", "yourapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertEquals(Set.of("myapp"), store.getApplicationNamesCache().get(Set.of("myapp", "yourapp"))); assertEquals(Set.copyOf(sourcePrivileges), store.getDescriptorsCache().get("myapp")); @@ -369,7 +369,7 @@ public void testGetPrivilegesCacheWithApplicationAndPrivilegeName() throws Excep store.getPrivileges(Collections.singletonList("myapp"), singletonList("user"), future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); // Not caching names with no wildcard assertNull(store.getApplicationNamesCache().get(singleton("myapp"))); @@ -388,7 +388,7 @@ public void testGetPrivilegesCacheWithNonExistentApplicationName() throws Except final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(Collections.singletonList("no-such-app"), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("no-such-app"))); assertEquals(0, store.getDescriptorsCache().count()); @@ -405,7 +405,7 @@ public void testGetPrivilegesCacheWithDifferentMatchAllApplicationNames() throws final PlainActionFuture> future = new PlainActionFuture<>(); store.getPrivileges(emptyList(), null, future); final SearchHit[] hits = buildHits(emptyList()); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertEquals(emptySet(), store.getApplicationNamesCache().get(singleton("*"))); assertEquals(1, store.getApplicationNamesCache().count()); assertResult(emptyList(), future); @@ -442,7 +442,7 @@ public void testCacheIsClearedByApplicationNameWhenPrivilegesAreModified() throw new ApplicationPrivilegeDescriptor("app2", "priv2b", Set.of("action:2b"), Map.of()) ); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertEquals(Set.of("app1", "app2"), store.getApplicationNamesCache().get(singleton("*"))); assertResult(sourcePrivileges, getFuture); @@ -505,7 +505,7 @@ public void testStaleResultsWillNotBeCached() { // Before the results can be cached, invalidate the cache to simulate stale search results store.getDescriptorsAndApplicationNamesCache().invalidateAll(); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); // Nothing should be cached since the results are stale assertEquals(0, store.getApplicationNamesCache().count()); @@ -553,7 +553,7 @@ protected void cacheFetchedDescriptors( final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(null, null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); // Make sure the caching is about to happen getPrivilegeCountDown.await(5, TimeUnit.SECONDS); @@ -779,7 +779,7 @@ public void testGetPrivilegesWorkWithoutCache() throws Exception { final PlainActionFuture> future = new PlainActionFuture<>(); store1.getPrivileges(singletonList("myapp"), null, future); final SearchHit[] hits = buildHits(sourcePrivileges); - listener.get().onResponse(buildSearchResponse(hits)); + ActionListener.respondAndRelease(listener.get(), buildSearchResponse(hits)); assertResult(sourcePrivileges, future); } diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index d05acc7a7b368..dde0698056ab2 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -13,7 +13,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -71,9 +70,11 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -169,12 +170,12 @@ private int createIndexAndIndexDocs(String cluster, String index, int numberOfSh return numDocs; } - public void testSearchAction_MatchAllQuery() { + public void testSearchAction_MatchAllQuery() throws ExecutionException, InterruptedException { testSearchAction(QueryBuilders.matchAllQuery(), true, localOldDocs + localNewDocs + remoteOldDocs + remoteNewDocs, 0); testSearchAction(QueryBuilders.matchAllQuery(), false, localOldDocs + localNewDocs + remoteOldDocs + remoteNewDocs, 0); } - public void testSearchAction_RangeQuery() { + public void testSearchAction_RangeQuery() throws ExecutionException, InterruptedException { testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(timestamp), // This query only matches new documents true, @@ -189,7 +190,7 @@ public void testSearchAction_RangeQuery() { ); } - public void testSearchAction_RangeQueryThatMatchesNoShards() { + public void testSearchAction_RangeQueryThatMatchesNoShards() throws ExecutionException, InterruptedException { testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents true, @@ -206,17 +207,19 @@ public void testSearchAction_RangeQueryThatMatchesNoShards() { ); } - private void testSearchAction(QueryBuilder query, boolean ccsMinimizeRoundtrips, long expectedHitCount, int expectedSkippedShards) { + private void testSearchAction(QueryBuilder query, boolean ccsMinimizeRoundtrips, long expectedHitCount, int expectedSkippedShards) + throws ExecutionException, InterruptedException { SearchSourceBuilder source = new SearchSourceBuilder().query(query); SearchRequest request = new SearchRequest("local_*", "*:remote_*"); request.source(source).setCcsMinimizeRoundtrips(ccsMinimizeRoundtrips); - SearchResponse response = client().search(request).actionGet(); - ElasticsearchAssertions.assertHitCount(response, expectedHitCount); - int expectedTotalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; - assertThat("Response was: " + response, response.getTotalShards(), is(equalTo(expectedTotalShards))); - assertThat("Response was: " + response, response.getSuccessfulShards(), is(equalTo(expectedTotalShards))); - assertThat("Response was: " + response, response.getFailedShards(), is(equalTo(0))); - assertThat("Response was: " + response, response.getSkippedShards(), is(equalTo(expectedSkippedShards))); + assertResponse(client().search(request), response -> { + ElasticsearchAssertions.assertHitCount(response, expectedHitCount); + int expectedTotalShards = oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards; + assertThat("Response was: " + response, response.getTotalShards(), is(equalTo(expectedTotalShards))); + assertThat("Response was: " + response, response.getSuccessfulShards(), is(equalTo(expectedTotalShards))); + assertThat("Response was: " + response, response.getFailedShards(), is(equalTo(0))); + assertThat("Response was: " + response, response.getSkippedShards(), is(equalTo(expectedSkippedShards))); + }); } public void testGetCheckpointAction_MatchAllQuery() throws InterruptedException { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 84f608b91dc95..41e23b54b0375 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockMustacheScriptEngine; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -312,8 +313,7 @@ protected long watchRecordCount(QueryBuilder query) { } protected long docCount(String index, SearchSourceBuilder source) { - SearchRequestBuilder builder = prepareSearch(index).setSource(source).setSize(0); - return builder.get().getHits().getTotalHits().value; + return SearchResponseUtils.getTotalHitsValue(prepareSearch(index).setSource(source).setSize(0)); } protected SearchResponse searchHistory(SearchSourceBuilder builder) { @@ -416,16 +416,10 @@ protected SearchResponse searchWatchRecords(Consumer reque protected long findNumberOfPerformedActions(String watchName) { refresh(); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions( - IndicesOptions.lenientExpandOpen() - ).setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id()))).get(); - long totalHistsValue; - try { - totalHistsValue = searchResponse.getHits().getTotalHits().value; - } finally { - searchResponse.decRef(); - } - return totalHistsValue; + return SearchResponseUtils.getTotalHitsValue( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id()))) + ); } protected void assertWatchWithNoActionNeeded(final String watchName, final long expectedWatchActionsWithNoActionNeeded) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index 49fd7218ed066..1308597b7bcf9 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.core.TimeValue; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; @@ -234,7 +235,7 @@ public void testAckWithRestart() throws Exception { assertThat(ackResponse.getStatus().actionStatus("_id").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); refresh("actions"); - long countAfterAck = prepareSearch("actions").setQuery(matchAllQuery()).get().getHits().getTotalHits().value; + long countAfterAck = SearchResponseUtils.getTotalHitsValue(prepareSearch("actions").setQuery(matchAllQuery())); assertThat(countAfterAck, greaterThanOrEqualTo(1L)); restartWatcherRandomly(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 0472722bd80a2..ea9295600fe41 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -385,12 +385,14 @@ private Collection loadWatches(ClusterState clusterState) { } SearchScrollRequest request = new SearchScrollRequest(response.getScrollId()); request.scroll(scrollTimeout); + response.decRef(); response = client.searchScroll(request).actionGet(defaultSearchTimeout); } } finally { if (response != null) { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(response.getScrollId()); + response.decRef(); client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); } }