From 383b4e5041d20be48fc559774664015870b86be1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 02:21:21 +1000 Subject: [PATCH 01/31] Mute org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests test20SecurityNotAutoConfiguredOnReInstallation #112635 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 6600ae65d5809..53570b3432721 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -204,6 +204,9 @@ tests: - class: org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidatorTests method: testKerbTicketGeneratedForDifferentServerFailsValidation issue: https://github.com/elastic/elasticsearch/issues/112639 +- class: org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests + method: test20SecurityNotAutoConfiguredOnReInstallation + issue: https://github.com/elastic/elasticsearch/issues/112635 # Examples: # From ef3a5a138530c43b8e6cd94585d13b853b819c57 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Sep 2024 12:32:19 -0400 Subject: [PATCH 02/31] ESQL: Fix CASE when conditions are multivalued (#112401) When CASE hits a multivalued field it was previously either crashing on fold or evaluating it to the first value. Since booleans are loaded in sorted order from lucene that *usually* means `false`. This changes the behavior to line up with the rest of ESQL - now multivalued fields are treated as `false` with a warning. You might say "hey wait! multivalued fields usually become `null`, not `false`!". Yes, dear reader, you are right. Very right. But! `CASE`'s contract is to immediatly convert its values into `true` or `false` using the standard boolean tri-valued logic. So `null` just become `false` immediately. This is how PostgreSQL, MySQL, and SQLite behave: ``` > SELECT CASE WHEN null THEN 1 ELSE 2 END; 2 ``` They turn that `null` into a false. And we're right there with them. Except, of course, that we're turning `[false, false]` and the like into `null` first. See!? It's consitent. Consistently confusing, but sane at least. The warning message just says "treating multivalued field as false" rather than explaining all of that. This also fixes up a few of CASE's docs which I noticed were kind of busted while working on CASE. I think the docs generation is having a lot of trouble with CASE so I've manually hacked the right thing into place, but we should figure out a better solution eventually. Closes #112359 --- docs/changelog/112401.yaml | 6 + .../functions/kibana/definition/case.json | 370 ++++++- .../esql/functions/parameters/case.asciidoc | 3 + .../esql/functions/signature/case.svg | 2 +- .../esql/functions/types/case.asciidoc | 41 +- .../src/main/resources/conditional.csv-spec | 76 ++ .../src/main/resources/meta.csv-spec | 6 +- .../xpack/esql/action/EsqlCapabilities.java | 5 + .../esql/expression/function/Warnings.java | 49 +- .../function/scalar/conditional/Case.java | 125 ++- .../function/AbstractAggregationTestCase.java | 2 + .../function/AbstractFunctionTestCase.java | 51 +- .../AbstractScalarFunctionTestCase.java | 29 +- .../expression/function/RailRoadDiagram.java | 30 +- .../expression/function/TestCaseSupplier.java | 127 ++- .../scalar/conditional/CaseExtraTests.java | 13 + .../scalar/conditional/CaseTests.java | 913 +++++++++++++++--- .../test/esql/160_union_types.yml | 64 +- 18 files changed, 1646 insertions(+), 266 deletions(-) create mode 100644 docs/changelog/112401.yaml diff --git a/docs/changelog/112401.yaml b/docs/changelog/112401.yaml new file mode 100644 index 0000000000000..65e9e76ac25f6 --- /dev/null +++ b/docs/changelog/112401.yaml @@ -0,0 +1,6 @@ +pr: 112401 +summary: "ESQL: Fix CASE when conditions are multivalued" +area: ES|QL +type: bug +issues: + - 112359 diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 27705cd3897f9..ab10460f48b25 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -22,6 +22,30 @@ "variadic" : true, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "boolean", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "boolean", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, { "params" : [ { @@ -40,6 +64,90 @@ "variadic" : true, "returnType" : "cartesian_point" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_point", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "cartesian_point", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "cartesian_point" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "cartesian_shape", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "date", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "date" + }, { "params" : [ { @@ -53,6 +161,12 @@ "type" : "date", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "date", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -76,6 +190,30 @@ "variadic" : true, "returnType" : "double" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "double", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "double", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "double" + }, { "params" : [ { @@ -94,6 +232,90 @@ "variadic" : true, "returnType" : "geo_point" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_point", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "geo_point", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "geo_point" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "geo_shape", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "integer", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "integer" + }, { "params" : [ { @@ -107,6 +329,12 @@ "type" : "integer", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "integer", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -130,6 +358,30 @@ "variadic" : true, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "ip", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "ip", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "ip" + }, { "params" : [ { @@ -143,12 +395,30 @@ "type" : "keyword", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." }, { - "name" : "falseValue", + "name" : "trueValue", "type" : "keyword", - "optional" : true, + "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "keyword", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -172,6 +442,30 @@ "variadic" : true, "returnType" : "long" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "long", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "long", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "long" + }, { "params" : [ { @@ -190,6 +484,48 @@ "variadic" : true, "returnType" : "text" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "text", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "text", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "unsigned_long", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "unsigned_long" + }, { "params" : [ { @@ -203,6 +539,12 @@ "type" : "unsigned_long", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "unsigned_long", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -225,6 +567,30 @@ ], "variadic" : true, "returnType" : "version" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "version", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "version", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/parameters/case.asciidoc b/docs/reference/esql/functions/parameters/case.asciidoc index ee6f7e499b3b3..f12eade4d5780 100644 --- a/docs/reference/esql/functions/parameters/case.asciidoc +++ b/docs/reference/esql/functions/parameters/case.asciidoc @@ -7,3 +7,6 @@ A condition. `trueValue`:: The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches. + +`elseValue`:: +The value that's returned when no condition evaluates to `true`. diff --git a/docs/reference/esql/functions/signature/case.svg b/docs/reference/esql/functions/signature/case.svg index d6fd7da38aca6..0d51a0647627d 100644 --- a/docs/reference/esql/functions/signature/case.svg +++ b/docs/reference/esql/functions/signature/case.svg @@ -1 +1 @@ -CASE(condition,trueValue) \ No newline at end of file +CASE(condition,trueValueelseValue) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index f6c8cfe9361d1..e8aa3eaf5daae 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -4,16 +4,33 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -condition | trueValue | result -boolean | boolean | boolean -boolean | cartesian_point | cartesian_point -boolean | date | date -boolean | double | double -boolean | geo_point | geo_point -boolean | integer | integer -boolean | ip | ip -boolean | long | long -boolean | text | text -boolean | unsigned_long | unsigned_long -boolean | version | version +condition | trueValue | elseValue | result +boolean | boolean | boolean | boolean +boolean | boolean | | boolean +boolean | cartesian_point | cartesian_point | cartesian_point +boolean | cartesian_point | | cartesian_point +boolean | cartesian_shape | cartesian_shape | cartesian_shape +boolean | cartesian_shape | | cartesian_shape +boolean | date | date | date +boolean | date | | date +boolean | double | double | double +boolean | double | | double +boolean | geo_point | geo_point | geo_point +boolean | geo_point | | geo_point +boolean | geo_shape | geo_shape | geo_shape +boolean | geo_shape | | geo_shape +boolean | integer | integer | integer +boolean | integer | | integer +boolean | ip | ip | ip +boolean | ip | | ip +boolean | keyword | keyword | keyword +boolean | keyword | | keyword +boolean | long | long | long +boolean | long | | long +boolean | text | text | text +boolean | text | | text +boolean | unsigned_long | unsigned_long | unsigned_long +boolean | unsigned_long | | unsigned_long +boolean | version | version | version +boolean | version | | version |=== diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index d4b45ca37fc2d..996b2b5030d82 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -94,6 +94,82 @@ M |10 M |10 ; +caseOnMv +required_capability: case_mv + +FROM employees +| WHERE emp_no == 10010 +| EVAL foo = CASE(still_hired, "still", is_rehired, "rehired", "not") +| KEEP still_hired, is_rehired, foo; +warning:Line 3:41: evaluation of [is_rehired] failed, treating result as false. Only first 20 failures recorded. +warning:Line 3:41: java.lang.IllegalArgumentException: CASE expects a single-valued boolean + +still_hired:boolean | is_rehired:boolean | foo:keyword + false | [false, false, true, true] | not +; + +caseOnConstantMvFalseTrue +required_capability: case_mv + +ROW foo = CASE([false, true], "a", "b"); +warning:Line 1:16: evaluation of [[false, true]] failed, treating result as false. Only first 20 failures recorded. +warning:Line 1:16: java.lang.IllegalArgumentException: CASE expects a single-valued boolean + +foo:keyword +b +; + +caseOnConstantMvTrueTrue +required_capability: case_mv + +ROW foo = CASE([true, true], "a", "b"); +warning:Line 1:16: evaluation of [[true, true]] failed, treating result as false. Only first 20 failures recorded. +warning:Line 1:16: java.lang.IllegalArgumentException: CASE expects a single-valued boolean + +foo:keyword +b +; + +caseOnMvSliceMv +required_capability: case_mv + +ROW foo = [true, false, false] | EVAL foo = CASE(MV_SLICE(foo, 0, 1), "a", "b"); +warning:Line 1:50: evaluation of [MV_SLICE(foo, 0, 1)] failed, treating result as false. Only first 20 failures recorded. +warning:Line 1:50: java.lang.IllegalArgumentException: CASE expects a single-valued boolean + +foo:keyword +b +; + +caseOnMvSliceSv +required_capability: case_mv + +ROW foo = [true, false, false] | EVAL foo = CASE(MV_SLICE(foo, 0), "a", "b"); + +foo:keyword +a +; + +caseOnConvertMvSliceMv +required_capability: case_mv + +ROW foo = ["true", "false", "false"] | EVAL foo = CASE(MV_SLICE(foo::BOOLEAN, 0, 1), "a", "b"); +warning:Line 1:56: evaluation of [MV_SLICE(foo::BOOLEAN, 0, 1)] failed, treating result as false. Only first 20 failures recorded. +warning:Line 1:56: java.lang.IllegalArgumentException: CASE expects a single-valued boolean + +foo:keyword +b +; + +caseOnConvertMvSliceSv +required_capability: case_mv + +ROW foo = ["true", "false", "false"] | EVAL foo = CASE(MV_SLICE(foo::BOOLEAN, 0), "a", "b"); + +foo:keyword +a +; + docsCaseSuccessRate // tag::docsCaseSuccessRate[] FROM sample_data diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index cd3ecfc367ddd..bc90f7f616631 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -11,7 +11,7 @@ synopsis:keyword "double avg(number:double|integer|long)" "double|date bin(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" "double|date bucket(field:integer|long|double|date, buckets:integer|long|double|date_period|time_duration, ?from:integer|long|double|date|keyword|text, ?to:integer|long|double|date|keyword|text)" -"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version case(condition:boolean, trueValue...:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version case(condition:boolean, trueValue...:boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "double cbrt(number:double|integer|long|unsigned_long)" "double|integer|long|unsigned_long ceil(number:double|integer|long|unsigned_long)" "boolean cidr_match(ip:ip, blockX...:keyword|text)" @@ -137,7 +137,7 @@ atan2 |[y_coordinate, x_coordinate] |["double|integer|long|unsign avg |number |"double|integer|long" |[""] bin |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] bucket |[field, buckets, from, to] |["integer|long|double|date", "integer|long|double|date_period|time_duration", "integer|long|double|date|keyword|text", "integer|long|double|date|keyword|text"] |[Numeric or date expression from which to derive buckets., Target number of buckets\, or desired bucket size if `from` and `to` parameters are omitted., Start of the range. Can be a number\, a date or a date expressed as a string., End of the range. Can be a number\, a date or a date expressed as a string.] -case |[condition, trueValue] |[boolean, "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version"] |[A condition., The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches.] +case |[condition, trueValue] |[boolean, "boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version"] |[A condition., The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches.] cbrt |number |"double|integer|long|unsigned_long" |"Numeric expression. If `null`, the function returns `null`." ceil |number |"double|integer|long|unsigned_long" |Numeric expression. If `null`, the function returns `null`. cidr_match |[ip, blockX] |[ip, "keyword|text"] |[IP address of type `ip` (both IPv4 and IPv6 are supported)., CIDR block to test the IP against.] @@ -391,7 +391,7 @@ atan2 |double avg |double |false |false |true bin |"double|date" |[false, false, true, true] |false |false bucket |"double|date" |[false, false, true, true] |false |false -case |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" |[false, false] |true |false +case |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |[false, false] |true |false cbrt |double |false |false |false ceil |"double|integer|long|unsigned_long" |false |false |false cidr_match |boolean |[false, false] |true |false diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 6e8d64edb6c86..858e2a3332bf8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -97,6 +97,11 @@ public enum Cap { */ AGG_TOP_IP_SUPPORT, + /** + * {@code CASE} properly handling multivalue conditions. + */ + CASE_MV, + /** * Optimization for ST_CENTROID changed some results in cartesian data. #108713 */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/Warnings.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/Warnings.java index 630cf62d0030a..87809ba536879 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/Warnings.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/Warnings.java @@ -32,30 +32,53 @@ public void registerException(Exception exception) { }; /** - * Create a new warnings object based on the given mode + * Create a new warnings object based on the given mode which warns that + * it treats the result as {@code null}. * @param warningsMode The warnings collection strategy to use - * @param source used to indicate where in the query the warning occured + * @param source used to indicate where in the query the warning occurred * @return A warnings collector object */ public static Warnings createWarnings(DriverContext.WarningsMode warningsMode, Source source) { - switch (warningsMode) { - case COLLECT -> { - return new Warnings(source); - } - case IGNORE -> { - return NOOP_WARNINGS; - } - } - throw new IllegalStateException("Unreachable"); + return createWarnings(warningsMode, source, "treating result as null"); + } + + /** + * Create a new warnings object based on the given mode which warns that + * it treats the result as {@code false}. + * @param warningsMode The warnings collection strategy to use + * @param source used to indicate where in the query the warning occurred + * @return A warnings collector object + */ + public static Warnings createWarningsTreatedAsFalse(DriverContext.WarningsMode warningsMode, Source source) { + return createWarnings(warningsMode, source, "treating result as false"); + } + + /** + * Create a new warnings object based on the given mode + * @param warningsMode The warnings collection strategy to use + * @param source used to indicate where in the query the warning occurred + * @param first warning message attached to the first result + * @return A warnings collector object + */ + public static Warnings createWarnings(DriverContext.WarningsMode warningsMode, Source source, String first) { + return switch (warningsMode) { + case COLLECT -> new Warnings(source, first); + case IGNORE -> NOOP_WARNINGS; + }; } public Warnings(Source source) { + this(source, "treating result as null"); + } + + public Warnings(Source source, String first) { location = format("Line {}:{}: ", source.source().getLineNumber(), source.source().getColumnNumber()); - first = format( + this.first = format( null, - "{}evaluation of [{}] failed, treating result as null. Only first {} failures recorded.", + "{}evaluation of [{}] failed, {}. Only first {} failures recorded.", location, source.text(), + first, MAX_ADDED_WARNINGS ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index 3239afabf6a24..979f681a7fbd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.Warnings; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.planner.PlannerUtils; @@ -46,7 +48,11 @@ public final class Case extends EsqlScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Case", Case::new); - record Condition(Expression condition, Expression value) {} + record Condition(Expression condition, Expression value) { + ConditionEvaluatorSupplier toEvaluator(Function toEvaluator) { + return new ConditionEvaluatorSupplier(condition.source(), toEvaluator.apply(condition), toEvaluator.apply(value)); + } + } private final List conditions; private final Expression elseValue; @@ -56,9 +62,12 @@ record Condition(Expression condition, Expression value) {} returnType = { "boolean", "cartesian_point", + "cartesian_shape", "date", + "date_nanos", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -94,9 +103,12 @@ public Case( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", + "date_nanos", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -215,25 +227,26 @@ public boolean foldable() { if (condition.condition.foldable() == false) { return false; } - Boolean b = (Boolean) condition.condition.fold(); - if (b != null && b) { + if (Boolean.TRUE.equals(condition.condition.fold())) { + /* + * `fold` can make four things here: + * 1. `TRUE` + * 2. `FALSE` + * 3. null + * 4. A list with more than one `TRUE` or `FALSE` in it. + * + * In the first case, we're foldable if the condition is foldable. + * The multivalued field will make a warning, but eventually + * become null. And null will become false. So cases 2-4 are + * the same. In those cases we are foldable only if the *rest* + * of the condition is foldable. + */ return condition.value.foldable(); } } return elseValue.foldable(); } - @Override - public Object fold() { - for (Condition condition : conditions) { - Boolean b = (Boolean) condition.condition.fold(); - if (b != null && b) { - return condition.value.fold(); - } - } - return elseValue.fold(); - } - /** * Fold the arms of {@code CASE} statements. *
    @@ -261,8 +274,20 @@ public Expression partiallyFold() { continue; } modified = true; - Boolean b = (Boolean) condition.condition.fold(); - if (b != null && b) { + if (Boolean.TRUE.equals(condition.condition.fold())) { + /* + * `fold` can make four things here: + * 1. `TRUE` + * 2. `FALSE` + * 3. null + * 4. A list with more than one `TRUE` or `FALSE` in it. + * + * In the first case, we fold to the value of the condition. + * The multivalued field will make a warning, but eventually + * become null. And null will become false. So cases 2-4 are + * the same. In those cases we fold the entire condition + * away, returning just what ever's remaining in the CASE. + */ newChildren.add(condition.value); return finishPartialFold(newChildren); } @@ -277,24 +302,23 @@ public Expression partiallyFold() { } private Expression finishPartialFold(List newChildren) { - if (newChildren.size() == 1) { - return newChildren.get(0); - } - return replaceChildren(newChildren); + return switch (newChildren.size()) { + case 0 -> new Literal(source(), null, dataType()); + case 1 -> newChildren.get(0); + default -> replaceChildren(newChildren); + }; } @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { ElementType resultType = PlannerUtils.toElementType(dataType()); - List conditionsFactories = conditions.stream() - .map(c -> new ConditionEvaluatorSupplier(toEvaluator.apply(c.condition), toEvaluator.apply(c.value))) - .toList(); + List conditionsFactories = conditions.stream().map(c -> c.toEvaluator(toEvaluator)).toList(); ExpressionEvaluator.Factory elseValueFactory = toEvaluator.apply(elseValue); return new ExpressionEvaluator.Factory() { @Override public ExpressionEvaluator get(DriverContext context) { return new CaseEvaluator( - context, + context.blockFactory(), resultType, conditionsFactories.stream().map(x -> x.apply(context)).toList(), elseValueFactory.get(context) @@ -303,40 +327,58 @@ public ExpressionEvaluator get(DriverContext context) { @Override public String toString() { - return "CaseEvaluator[resultType=" - + resultType - + ", conditions=" - + conditionsFactories - + ", elseVal=" - + elseValueFactory - + ']'; + return "CaseEvaluator[conditions=" + conditionsFactories + ", elseVal=" + elseValueFactory + ']'; } }; } - record ConditionEvaluatorSupplier(ExpressionEvaluator.Factory condition, ExpressionEvaluator.Factory value) + record ConditionEvaluatorSupplier(Source conditionSource, ExpressionEvaluator.Factory condition, ExpressionEvaluator.Factory value) implements Function { @Override public ConditionEvaluator apply(DriverContext driverContext) { - return new ConditionEvaluator(condition.get(driverContext), value.get(driverContext)); + return new ConditionEvaluator( + /* + * We treat failures as null just like any other failure. + * It's just that we then *immediately* convert it to + * true or false using the tri-valued boolean logic stuff. + * And that makes it into false. This is, *exactly* what + * happens in PostgreSQL and MySQL and SQLite: + * > SELECT CASE WHEN null THEN 1 ELSE 2 END; + * 2 + * Rather than go into depth about this in the warning message, + * we just say "false". + */ + Warnings.createWarningsTreatedAsFalse(driverContext.warningsMode(), conditionSource), + condition.get(driverContext), + value.get(driverContext) + ); } @Override public String toString() { - return "ConditionEvaluator[" + "condition=" + condition + ", value=" + value + ']'; + return "ConditionEvaluator[condition=" + condition + ", value=" + value + ']'; } } - record ConditionEvaluator(EvalOperator.ExpressionEvaluator condition, EvalOperator.ExpressionEvaluator value) implements Releasable { + record ConditionEvaluator( + Warnings conditionWarnings, + EvalOperator.ExpressionEvaluator condition, + EvalOperator.ExpressionEvaluator value + ) implements Releasable { @Override public void close() { Releasables.closeExpectNoException(condition, value); } + + @Override + public String toString() { + return "ConditionEvaluator[condition=" + condition + ", value=" + value + ']'; + } } private record CaseEvaluator( - DriverContext driverContext, + BlockFactory blockFactory, ElementType resultType, List conditions, EvalOperator.ExpressionEvaluator elseVal @@ -353,10 +395,11 @@ public Block eval(Page page) { * a time - but it's not at all fast. */ int positionCount = page.getPositionCount(); - try (Block.Builder result = resultType.newBlockBuilder(positionCount, driverContext.blockFactory())) { + try (Block.Builder result = resultType.newBlockBuilder(positionCount, blockFactory)) { position: for (int p = 0; p < positionCount; p++) { int[] positions = new int[] { p }; Page limited = new Page( + 1, IntStream.range(0, page.getBlockCount()).mapToObj(b -> page.getBlock(b).filter(positions)).toArray(Block[]::new) ); try (Releasable ignored = limited::releaseBlocks) { @@ -365,6 +408,12 @@ public Block eval(Page page) { if (b.isNull(0)) { continue; } + if (b.getValueCount(0) > 1) { + condition.conditionWarnings.registerException( + new IllegalArgumentException("CASE expects a single-valued boolean") + ); + continue; + } if (false == b.getBoolean(b.getFirstValueIndex(0))) { continue; } @@ -390,7 +439,7 @@ public void close() { @Override public String toString() { - return "CaseEvaluator[resultType=" + resultType + ", conditions=" + conditions + ", elseVal=" + elseVal + ']'; + return "CaseEvaluator[conditions=" + conditions + ", elseVal=" + elseVal + ']'; } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 4e26baddd013b..54db9afa291ad 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -105,8 +105,10 @@ protected static List withNoRowsExpectingNull(List anyNullIsNull( expectedType.expectedType(finalNullPosition, nulledData.type(), oc), nullValue(), null, + null, oc.getExpectedTypeError(), null, + null, null ); })); @@ -246,8 +248,10 @@ protected static List anyNullIsNull( expectedType.expectedType(finalNullPosition, DataType.NULL, oc), nullValue(), null, + null, oc.getExpectedTypeError(), null, + null, null ); })); @@ -642,9 +646,11 @@ protected static List randomizeBytesRefsOffset(List args = description.args(); @@ -707,7 +711,7 @@ public static void testFunctionInfo() { ); List> typesFromSignature = new ArrayList<>(); - Set returnFromSignature = new HashSet<>(); + Set returnFromSignature = new TreeSet<>(); for (int i = 0; i < args.size(); i++) { typesFromSignature.add(new HashSet<>()); } @@ -828,6 +832,28 @@ public static void renderDocs() throws IOException { FunctionDefinition definition = definition(name); if (definition != null) { EsqlFunctionRegistry.FunctionDescription description = EsqlFunctionRegistry.description(definition); + if (name.equals("case")) { + /* + * Hack the description, so we render a proper one for case. + */ + // TODO build the description properly *somehow* + EsqlFunctionRegistry.ArgSignature trueValue = description.args().get(1); + EsqlFunctionRegistry.ArgSignature falseValue = new EsqlFunctionRegistry.ArgSignature( + "elseValue", + trueValue.type(), + "The value that's returned when no condition evaluates to `true`.", + true, + EsqlFunctionRegistry.getTargetType(trueValue.type()) + ); + description = new EsqlFunctionRegistry.FunctionDescription( + description.name(), + List.of(description.args().get(0), trueValue, falseValue), + description.returnType(), + description.description(), + description.variadic(), + description.isAggregation() + ); + } renderTypes(description.argNames()); renderParametersList(description.argNames(), description.argDescriptions()); FunctionInfo info = EsqlFunctionRegistry.functionInfo(definition); @@ -836,22 +862,7 @@ public static void renderDocs() throws IOException { boolean hasAppendix = renderAppendix(info.appendix()); renderFullLayout(name, info.preview(), hasExamples, hasAppendix); renderKibanaInlineDocs(name, info); - List args = description.args(); - if (name.equals("case")) { - EsqlFunctionRegistry.ArgSignature falseValue = args.get(1); - args = List.of( - args.get(0), - falseValue, - new EsqlFunctionRegistry.ArgSignature( - "falseValue", - falseValue.type(), - falseValue.description(), - true, - EsqlFunctionRegistry.getTargetType(falseValue.type()) - ) - ); - } - renderKibanaFunctionDefinition(name, info, args, description.variadic()); + renderKibanaFunctionDefinition(name, info, description.args(), description.variadic()); return; } LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function '" + name + "' isn't registered"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index fed81d4260bcd..85db73901352b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -38,7 +38,6 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -120,6 +119,9 @@ public final void testEvaluate() { Object result; try (ExpressionEvaluator evaluator = evaluator(expression).get(driverContext())) { + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } try (Block block = evaluator.eval(row(testCase.getDataValues()))) { assertThat(block.getPositionCount(), is(1)); result = toJavaObjectUnsignedLongAware(block, 0); @@ -177,6 +179,10 @@ public final void testEvaluateBlockWithNulls() { */ public final void testCrankyEvaluateBlockWithoutNulls() { assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); + assumeTrue( + "sometimes the cranky breaker silences warnings, just skip these cases", + testCase.getExpectedBuildEvaluatorWarnings() == null + ); try { testEvaluateBlock(driverContext().blockFactory(), crankyContext(), false); } catch (CircuitBreakingException ex) { @@ -190,6 +196,10 @@ public final void testCrankyEvaluateBlockWithoutNulls() { */ public final void testCrankyEvaluateBlockWithNulls() { assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); + assumeTrue( + "sometimes the cranky breaker silences warnings, just skip these cases", + testCase.getExpectedBuildEvaluatorWarnings() == null + ); try { testEvaluateBlock(driverContext().blockFactory(), crankyContext(), true); } catch (CircuitBreakingException ex) { @@ -242,10 +252,13 @@ private void testEvaluateBlock(BlockFactory inputBlockFactory, DriverContext con ExpressionEvaluator eval = evaluator(expression).get(context); Block block = eval.eval(new Page(positions, manyPositionsBlocks)) ) { + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } assertThat(block.getPositionCount(), is(positions)); for (int p = 0; p < positions; p++) { if (nullPositions.contains(p)) { - assertThat(toJavaObject(block, p), allNullsMatcher()); + assertThat(toJavaObjectUnsignedLongAware(block, p), allNullsMatcher()); continue; } assertThat(toJavaObjectUnsignedLongAware(block, p), testCase.getMatcher()); @@ -275,6 +288,9 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru int count = 10_000; int threads = 5; var evalSupplier = evaluator(expression); + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } ExecutorService exec = Executors.newFixedThreadPool(threads); try { List> futures = new ArrayList<>(); @@ -310,6 +326,9 @@ public final void testEvaluatorToString() { assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); var factory = evaluator(expression); try (ExpressionEvaluator ev = factory.get(driverContext())) { + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } assertThat(ev.toString(), testCase.evaluatorToString()); } } @@ -322,6 +341,9 @@ public final void testFactoryToString() { } assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); var factory = evaluator(buildFieldExpression(testCase)); + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } assertThat(factory.toString(), testCase.evaluatorToString()); } @@ -342,6 +364,9 @@ public final void testFold() { result = NumericUtils.unsignedLongAsBigInteger((Long) result); } assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } if (testCase.getExpectedWarnings() != null) { assertWarnings(testCase.getExpectedWarnings()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index 4e00fa9f41fbd..df0737feadd8d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -49,18 +49,28 @@ static String functionSignature(FunctionDefinition definition) throws IOExceptio List expressions = new ArrayList<>(); expressions.add(new SpecialSequence(definition.name().toUpperCase(Locale.ROOT))); expressions.add(new Syntax("(")); - boolean first = true; - List args = EsqlFunctionRegistry.description(definition).argNames(); - for (String arg : args) { - if (arg.endsWith("...")) { - expressions.add(new Repetition(new Sequence(new Syntax(","), new Literal(arg.substring(0, arg.length() - 3))), 0, null)); - } else { - if (first) { - first = false; + + if (definition.name().equals("case")) { + // CASE is so weird let's just hack this together manually + Sequence seq = new Sequence(new Literal("condition"), new Syntax(","), new Literal("trueValue")); + expressions.add(new Repetition(seq, 1, null)); + expressions.add(new Repetition(new Literal("elseValue"), 0, 1)); + } else { + boolean first = true; + List args = EsqlFunctionRegistry.description(definition).argNames(); + for (String arg : args) { + if (arg.endsWith("...")) { + expressions.add( + new Repetition(new Sequence(new Syntax(","), new Literal(arg.substring(0, arg.length() - 3))), 0, null) + ); } else { - expressions.add(new Syntax(",")); + if (first) { + first = false; + } else { + expressions.add(new Syntax(",")); + } + expressions.add(new Literal(arg)); } - expressions.add(new Literal(arg)); } } expressions.add(new Syntax(")")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index a1caa784c9787..e44ea907518b4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -67,6 +67,21 @@ public static String nameFromTypes(List types) { return types.stream().map(t -> "<" + t.typeName() + ">").collect(Collectors.joining(", ")); } + /** + * Build a name for the test case based on objects likely to describe it. + */ + public static String nameFrom(List paramDescriptors) { + return paramDescriptors.stream().map(p -> { + if (p == null) { + return "null"; + } + if (p instanceof DataType t) { + return "<" + t.typeName() + ">"; + } + return p.toString(); + }).collect(Collectors.joining(", ")); + } + public static List stringCases( BinaryOperator expected, BiFunction evaluatorToString, @@ -1305,7 +1320,7 @@ public static String castToDoubleEvaluator(String original, DataType current) { throw new UnsupportedOperationException(); } - public static class TestCase { + public static final class TestCase { /** * The {@link Source} this test case should be run with */ @@ -1333,22 +1348,34 @@ public static class TestCase { */ private final String[] expectedWarnings; + /** + * Warnings that are added by calling {@link AbstractFunctionTestCase#evaluator} + * or {@link Expression#fold()} on the expression built by this. + */ + private final String[] expectedBuildEvaluatorWarnings; + private final String expectedTypeError; private final boolean canBuildEvaluator; private final Class foldingExceptionClass; private final String foldingExceptionMessage; + /** + * Extra data embedded in the test case. Test subclasses can cast + * as needed and extra whatever helps them. + */ + private final Object extra; + public TestCase(List data, String evaluatorToString, DataType expectedType, Matcher matcher) { this(data, equalTo(evaluatorToString), expectedType, matcher); } public TestCase(List data, Matcher evaluatorToString, DataType expectedType, Matcher matcher) { - this(data, evaluatorToString, expectedType, matcher, null, null, null, null); + this(data, evaluatorToString, expectedType, matcher, null, null, null, null, null, null); } public static TestCase typeError(List data, String expectedTypeError) { - return new TestCase(data, null, null, null, null, expectedTypeError, null, null); + return new TestCase(data, null, null, null, null, null, expectedTypeError, null, null, null); } TestCase( @@ -1357,9 +1384,11 @@ public static TestCase typeError(List data, String expectedTypeError) DataType expectedType, Matcher matcher, String[] expectedWarnings, + String[] expectedBuildEvaluatorWarnings, String expectedTypeError, Class foldingExceptionClass, - String foldingExceptionMessage + String foldingExceptionMessage, + Object extra ) { this.source = Source.EMPTY; this.data = data; @@ -1369,10 +1398,12 @@ public static TestCase typeError(List data, String expectedTypeError) Matcher downcast = (Matcher) matcher; this.matcher = downcast; this.expectedWarnings = expectedWarnings; + this.expectedBuildEvaluatorWarnings = expectedBuildEvaluatorWarnings; this.expectedTypeError = expectedTypeError; this.canBuildEvaluator = data.stream().allMatch(d -> d.forceLiteral || DataType.isRepresentable(d.type)); this.foldingExceptionClass = foldingExceptionClass; this.foldingExceptionMessage = foldingExceptionMessage; + this.extra = extra; } public Source getSource() { @@ -1419,6 +1450,14 @@ public String[] getExpectedWarnings() { return expectedWarnings; } + /** + * Warnings that are added by calling {@link AbstractFunctionTestCase#evaluator} + * or {@link Expression#fold()} on the expression built by this. + */ + public String[] getExpectedBuildEvaluatorWarnings() { + return expectedBuildEvaluatorWarnings; + } + public Class foldingExceptionClass() { return foldingExceptionClass; } @@ -1431,28 +1470,88 @@ public String getExpectedTypeError() { return expectedTypeError; } + /** + * Extra data embedded in the test case. Test subclasses can cast + * as needed and extra whatever helps them. + */ + public Object extra() { + return extra; + } + + /** + * Build a new {@link TestCase} with new {@link #extra()}. + */ + public TestCase withExtra(Object extra) { + return new TestCase( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + expectedBuildEvaluatorWarnings, + expectedTypeError, + foldingExceptionClass, + foldingExceptionMessage, + extra + ); + } + public TestCase withWarning(String warning) { - String[] newWarnings; - if (expectedWarnings != null) { - newWarnings = Arrays.copyOf(expectedWarnings, expectedWarnings.length + 1); - newWarnings[expectedWarnings.length] = warning; - } else { - newWarnings = new String[] { warning }; - } return new TestCase( data, evaluatorToString, expectedType, matcher, - newWarnings, + addWarning(expectedWarnings, warning), + expectedBuildEvaluatorWarnings, + expectedTypeError, + foldingExceptionClass, + foldingExceptionMessage, + extra + ); + } + + /** + * Warnings that are added by calling {@link AbstractFunctionTestCase#evaluator} + * or {@link Expression#fold()} on the expression built by this. + */ + public TestCase withBuildEvaluatorWarning(String warning) { + return new TestCase( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + addWarning(expectedBuildEvaluatorWarnings, warning), expectedTypeError, foldingExceptionClass, - foldingExceptionMessage + foldingExceptionMessage, + extra ); } + private String[] addWarning(String[] warnings, String warning) { + if (warnings == null) { + return new String[] { warning }; + } + String[] newWarnings = Arrays.copyOf(warnings, warnings.length + 1); + newWarnings[warnings.length] = warning; + return newWarnings; + } + public TestCase withFoldingException(Class clazz, String message) { - return new TestCase(data, evaluatorToString, expectedType, matcher, expectedWarnings, expectedTypeError, clazz, message); + return new TestCase( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + expectedBuildEvaluatorWarnings, + expectedTypeError, + clazz, + message, + extra + ); } public DataType expectedType() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java index f2c4625f5a3cb..de84086e3cb4e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseExtraTests.java @@ -72,6 +72,19 @@ public void testPartialFoldDropsFirstFalse() { ); } + public void testPartialFoldMv() { + Case c = new Case( + Source.synthetic("case"), + new Literal(Source.EMPTY, List.of(true, true), DataType.BOOLEAN), + List.of(field("first", DataType.LONG), field("last_cond", DataType.BOOLEAN), field("last", DataType.LONG)) + ); + assertThat(c.foldable(), equalTo(false)); + assertThat( + c.partiallyFold(), + equalTo(new Case(Source.synthetic("case"), field("last_cond", DataType.BOOLEAN), List.of(field("last", DataType.LONG)))) + ); + } + public void testPartialFoldNoop() { Case c = new Case( Source.synthetic("case"), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index 97515db85e8c3..7b26ac8c099dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -10,22 +10,48 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matcher; -import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Locale; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.xpack.esql.EsqlTestUtils.randomLiteral; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.hamcrest.Matchers.startsWith; public class CaseTests extends AbstractScalarFunctionTestCase { + private static final List TYPES = List.of( + DataType.KEYWORD, + DataType.TEXT, + DataType.BOOLEAN, + DataType.DATETIME, + DataType.DATE_NANOS, + DataType.DOUBLE, + DataType.INTEGER, + DataType.LONG, + DataType.UNSIGNED_LONG, + DataType.IP, + DataType.VERSION, + DataType.CARTESIAN_POINT, + DataType.GEO_POINT, + DataType.CARTESIAN_SHAPE, + DataType.GEO_SHAPE, + DataType.NULL + ); public CaseTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -36,168 +62,755 @@ public CaseTests(@Name("TestCase") Supplier testCaseS */ @ParametersFactory public static Iterable parameters() { - // TODO this needs lots of stuff flipped to parameters - return parameterSuppliersFromTypedData( - List.of(new TestCaseSupplier("keyword", List.of(DataType.BOOLEAN, DataType.KEYWORD, DataType.KEYWORD), () -> { - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(new BytesRef("a"), DataType.KEYWORD, "a"), - new TestCaseSupplier.TypedData(new BytesRef("b"), DataType.KEYWORD, "b") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef("a")) - ); - }), new TestCaseSupplier("text", List.of(DataType.BOOLEAN, DataType.TEXT), () -> { - List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(new BytesRef("a"), DataType.TEXT, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.TEXT, - nullValue() - ); - }), new TestCaseSupplier("boolean", List.of(DataType.BOOLEAN, DataType.BOOLEAN), () -> { - List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BOOLEAN, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.BOOLEAN, - nullValue() - ); - }), new TestCaseSupplier("date", List.of(DataType.BOOLEAN, DataType.DATETIME), () -> { - long value = randomNonNegativeLong(); - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.DATETIME, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=LONG, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.DATETIME, - equalTo(value) - ); - }), new TestCaseSupplier("double", List.of(DataType.BOOLEAN, DataType.DOUBLE), () -> { - double value = randomDouble(); - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.DOUBLE, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=DOUBLE, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.DOUBLE, - equalTo(value) - ); - }), new TestCaseSupplier("integer", List.of(DataType.BOOLEAN, DataType.INTEGER), () -> { - int value = randomInt(); + List suppliers = new ArrayList<>(); + for (DataType type : TYPES) { + twoAndThreeArgs(suppliers, true, true, type, List.of()); + twoAndThreeArgs(suppliers, false, false, type, List.of()); + twoAndThreeArgs(suppliers, null, false, type, List.of()); + twoAndThreeArgs( + suppliers, + randomMultivaluedCondition(), + false, + type, + List.of( + "Line -1:-1: evaluation of [cond] failed, treating result as false. Only first 20 failures recorded.", + "Line -1:-1: java.lang.IllegalArgumentException: CASE expects a single-valued boolean" + ) + ); + } + suppliers = errorsForCasesWithoutExamples( + suppliers, + (includeOrdinal, validPerPosition, types) -> typeErrorMessage(includeOrdinal, types) + ); + + for (DataType type : TYPES) { + fourAndFiveArgs(suppliers, true, randomSingleValuedCondition(), 0, type, List.of()); + fourAndFiveArgs(suppliers, false, true, 1, type, List.of()); + fourAndFiveArgs(suppliers, false, false, 2, type, List.of()); + fourAndFiveArgs(suppliers, null, true, 1, type, List.of()); + fourAndFiveArgs(suppliers, null, false, 2, type, List.of()); + fourAndFiveArgs( + suppliers, + randomMultivaluedCondition(), + true, + 1, + type, + List.of( + "Line -1:-1: evaluation of [cond1] failed, treating result as false. Only first 20 failures recorded.", + "Line -1:-1: java.lang.IllegalArgumentException: CASE expects a single-valued boolean" + ) + ); + fourAndFiveArgs( + suppliers, + false, + randomMultivaluedCondition(), + 2, + type, + List.of( + "Line -1:-1: evaluation of [cond2] failed, treating result as false. Only first 20 failures recorded.", + "Line -1:-1: java.lang.IllegalArgumentException: CASE expects a single-valued boolean" + ) + ); + } + return + + parameterSuppliersFromTypedData(suppliers); + } + + private static void twoAndThreeArgs( + List suppliers, + Object cond, + boolean lhsOrRhs, + DataType type, + List warnings + ) { + suppliers.add(new TestCaseSupplier(TestCaseSupplier.nameFrom(Arrays.asList(cond, type)), List.of(DataType.BOOLEAN, type), () -> { + Object lhs = randomLiteral(type).value(); + List typedData = List.of(cond(cond, "cond"), new TestCaseSupplier.TypedData(lhs, type, "lhs")); + return testCase(type, typedData, lhsOrRhs ? lhs : null, toStringMatcher(1, true), false, null, addWarnings(warnings)); + })); + suppliers.add( + new TestCaseSupplier(TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), List.of(DataType.BOOLEAN, type, type), () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(type).value(); List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.INTEGER, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=INT, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.INTEGER, - nullValue() + cond(cond, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, type, "rhs") ); - }), new TestCaseSupplier("long", List.of(DataType.BOOLEAN, DataType.LONG), () -> { - long value = randomLong(); - List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.LONG, "trueValue") + return testCase(type, typedData, lhsOrRhs ? lhs : rhs, toStringMatcher(1, false), false, null, addWarnings(warnings)); + }) + ); + if (lhsOrRhs) { + suppliers.add( + new TestCaseSupplier( + "foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), + List.of(DataType.BOOLEAN, type, type), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond").forceLiteral(), + new TestCaseSupplier.TypedData(lhs, type, "lhs").forceLiteral(), + new TestCaseSupplier.TypedData(rhs, type, "rhs") + ); + return testCase( + type, + typedData, + lhs, + startsWith("LiteralsEvaluator[lit="), + true, + null, + addBuildEvaluatorWarnings(warnings) + ); + } + ) + ); + suppliers.add( + new TestCaseSupplier( + "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type)), + List.of(DataType.BOOLEAN, type), + () -> { + Object lhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond").forceLiteral(), + new TestCaseSupplier.TypedData(lhs, type, "lhs") + ); + return testCase( + type, + typedData, + lhs, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator"), + false, + List.of(typedData.get(1)), + addBuildEvaluatorWarnings(warnings) + ); + } + ) + ); + } else { + suppliers.add( + new TestCaseSupplier( + "foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type)), + List.of(DataType.BOOLEAN, type), + () -> { + Object lhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond").forceLiteral(), + new TestCaseSupplier.TypedData(lhs, type, "lhs") + ); + return testCase( + type, + typedData, + null, + startsWith("LiteralsEvaluator[lit="), + true, + List.of(new TestCaseSupplier.TypedData(null, type, "null").forceLiteral()), + addBuildEvaluatorWarnings(warnings) + ); + } + ) + ); + } + + suppliers.add( + new TestCaseSupplier( + "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), + List.of(DataType.BOOLEAN, type, type), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond").forceLiteral(), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, type, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator"), + false, + List.of(typedData.get(lhsOrRhs ? 1 : 2)), + addWarnings(warnings) + ); + } + ) + ); + + // Fill in some cases with null conditions or null values + if (cond == null) { + suppliers.add( + new TestCaseSupplier(TestCaseSupplier.nameFrom(Arrays.asList(DataType.NULL, type)), List.of(DataType.NULL, type), () -> { + Object lhs = randomLiteral(type).value(); + List typedData = List.of( + new TestCaseSupplier.TypedData(null, DataType.NULL, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : null, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + }) + ); + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.NULL, type, type)), + List.of(DataType.NULL, type, type), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(type).value(); + List typedData = List.of( + new TestCaseSupplier.TypedData(null, DataType.NULL, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, type, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.BOOLEAN, DataType.NULL, type)), + List.of(DataType.BOOLEAN, DataType.NULL, type), + () -> { + Object rhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond"), + new TestCaseSupplier.TypedData(null, DataType.NULL, "lhs"), + new TestCaseSupplier.TypedData(rhs, type, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? null : rhs, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.BOOLEAN, type, DataType.NULL)), + List.of(DataType.BOOLEAN, type, DataType.NULL), + () -> { + Object lhs = randomLiteral(type).value(); + List typedData = List.of( + cond(cond, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(null, DataType.NULL, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : null, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } + + private static void fourAndFiveArgs( + List suppliers, + Object cond1, + Object cond2, + int result, + DataType type, + List warnings + ) { + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1"), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2") + ); + return testCase(type, typedData, switch (result) { + case 0 -> r1; + case 1 -> r2; + case 2 -> null; + default -> throw new AssertionError("unsupported result " + result); + }, toStringMatcher(2, true), false, null, addWarnings(warnings)); + } + ) + ); + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1"), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase(type, typedData, switch (result) { + case 0 -> r1; + case 1 -> r2; + case 2 -> r3; + default -> throw new AssertionError("unsupported result " + result); + }, toStringMatcher(2, false), false, null, addWarnings(warnings)); + } + ) + ); + // Add some foldable and partially foldable cases. This isn't every combination of fold-ability, but it's many. + switch (result) { + case 0 -> { + suppliers.add( + new TestCaseSupplier( + "foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1").forceLiteral(), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r1, + startsWith("LiteralsEvaluator[lit="), + true, + null, + addBuildEvaluatorWarnings(warnings) + ); + } + ) ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=LONG, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.LONG, - nullValue() + suppliers.add( + new TestCaseSupplier( + "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r1, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + List.of(typedData.get(1)), + addBuildEvaluatorWarnings(warnings) + ); + } + ) ); - }), new TestCaseSupplier("unsigned_long", List.of(DataType.BOOLEAN, DataType.UNSIGNED_LONG), () -> { - BigInteger value = randomUnsignedLongBetween(BigInteger.ZERO, UNSIGNED_LONG_MAX); - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.UNSIGNED_LONG, "trueValue") + } + case 1 -> { + suppliers.add( + new TestCaseSupplier( + "foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1").forceLiteral(), + cond(cond2, "cond2").forceLiteral(), + new TestCaseSupplier.TypedData(r2, type, "r2").forceLiteral(), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r2, + startsWith("LiteralsEvaluator[lit="), + true, + null, + addBuildEvaluatorWarnings(warnings) + ); + } + ) ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=LONG, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.UNSIGNED_LONG, - equalTo(value) + suppliers.add( + new TestCaseSupplier( + "partial foldable 1 " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1").forceLiteral(), + cond(cond2, "cond2").forceLiteral(), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r2, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + List.of(typedData.get(3)), + addWarnings(warnings) + ); + } + ) ); - }), new TestCaseSupplier("ip", List.of(DataType.BOOLEAN, DataType.IP), () -> { - BytesRef value = (BytesRef) randomLiteral(DataType.IP).value(); - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.IP, "trueValue") + suppliers.add( + new TestCaseSupplier( + "partial foldable 2 " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1").forceLiteral(), + cond(cond2, "cond2").forceLiteral(), + new TestCaseSupplier.TypedData(r2, type, "r2") + ); + return testCase( + type, + typedData, + r2, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + List.of(typedData.get(3)), + addWarnings(warnings) + ); + } + ) ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.IP, - equalTo(value) + suppliers.add( + new TestCaseSupplier( + "partial foldable 3 " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1").forceLiteral(), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2") + ); + return testCase( + type, + typedData, + r2, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + typedData.subList(2, 4), + addWarnings(warnings) + ); + } + ) ); - }), new TestCaseSupplier("version", List.of(DataType.BOOLEAN, DataType.VERSION), () -> { - BytesRef value = (BytesRef) randomLiteral(DataType.VERSION).value(); - List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.VERSION, "trueValue") + } + case 2 -> { + suppliers.add( + new TestCaseSupplier( + "foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2").forceLiteral(), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3").forceLiteral() + ); + return testCase( + type, + typedData, + r3, + startsWith("LiteralsEvaluator[lit="), + true, + null, + addBuildEvaluatorWarnings(warnings) + ); + } + ) ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.VERSION, - nullValue() + suppliers.add( + new TestCaseSupplier( + "partial foldable 1 " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2").forceLiteral(), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r3, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + List.of(typedData.get(4)), + addWarnings(warnings) + ); + } + ) ); - }), new TestCaseSupplier("cartesian_point", List.of(DataType.BOOLEAN, DataType.CARTESIAN_POINT), () -> { - BytesRef value = (BytesRef) randomLiteral(DataType.CARTESIAN_POINT).value(); - List typedData = List.of( - new TestCaseSupplier.TypedData(false, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.CARTESIAN_POINT, "trueValue") + suppliers.add( + new TestCaseSupplier( + "partial foldable 2 " + TestCaseSupplier.nameFrom(Arrays.asList(cond1, type, cond2, type, type)), + List.of(DataType.BOOLEAN, type, DataType.BOOLEAN, type, type), + () -> { + Object r1 = randomLiteral(type).value(); + Object r2 = randomLiteral(type).value(); + Object r3 = randomLiteral(type).value(); + List typedData = List.of( + cond(cond1, "cond1").forceLiteral(), + new TestCaseSupplier.TypedData(r1, type, "r1"), + cond(cond2, "cond2"), + new TestCaseSupplier.TypedData(r2, type, "r2"), + new TestCaseSupplier.TypedData(r3, type, "r3") + ); + return testCase( + type, + typedData, + r3, + startsWith("CaseEvaluator[conditions=[ConditionEvaluator[condition=LiteralsEvaluator[lit="), + false, + typedData.subList(2, 5), + addWarnings(warnings) + ); + } + ) ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.CARTESIAN_POINT, - nullValue() - ); - }), new TestCaseSupplier("geo_point", List.of(DataType.BOOLEAN, DataType.GEO_POINT), () -> { - BytesRef value = (BytesRef) randomLiteral(DataType.GEO_POINT).value(); - List typedData = List.of( - new TestCaseSupplier.TypedData(true, DataType.BOOLEAN, "cond"), - new TestCaseSupplier.TypedData(value, DataType.GEO_POINT, "trueValue") - ); - return new TestCaseSupplier.TestCase( - typedData, - "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " - + "value=Attribute[channel=1]]], elseVal=LiteralsEvaluator[lit=null]]", - DataType.GEO_POINT, - equalTo(value) - ); - })) + } + default -> throw new IllegalArgumentException("unsupported " + result); + } + } + + private static Matcher toStringMatcher(int conditions, boolean trailingNull) { + StringBuilder result = new StringBuilder("CaseEvaluator[conditions=["); + int channel = 0; + for (int i = 0; i < conditions; i++) { + if (i != 0) { + result.append(", "); + } + result.append("ConditionEvaluator[condition=Attribute[channel=").append(channel++); + result.append("], value=Attribute[channel=").append(channel++).append("]]"); + } + if (trailingNull) { + result.append("], elseVal=LiteralsEvaluator[lit=null]]"); + } else { + result.append("], elseVal=Attribute[channel=").append(channel).append("]]"); + } + return equalTo(result.toString()); + } + + private static TestCaseSupplier.TypedData cond(Object cond, String name) { + return new TestCaseSupplier.TypedData(cond instanceof Supplier s ? s.get() : cond, DataType.BOOLEAN, name); + } + + private static TestCaseSupplier.TestCase testCase( + DataType type, + List typedData, + Object result, + Matcher evaluatorToString, + boolean foldable, + @Nullable List partialFold, + Function decorate + ) { + if (type == DataType.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + return decorate.apply( + new TestCaseSupplier.TestCase(typedData, evaluatorToString, type, equalTo(result)).withExtra(new Extra(foldable, partialFold)) ); } @Override - protected Expression build(Source source, List args) { + protected Case build(Source source, List args) { return new Case(Source.EMPTY, args.get(0), args.subList(1, args.size())); } + + private static Supplier randomSingleValuedCondition() { + return new Supplier<>() { + @Override + public Boolean get() { + return randomBoolean(); + } + + @Override + public String toString() { + return "multivalue"; + } + }; + } + + private static Supplier> randomMultivaluedCondition() { + return new Supplier<>() { + @Override + public List get() { + return randomList(2, 100, ESTestCase::randomBoolean); + } + + @Override + public String toString() { + return "multivalue"; + } + }; + } + + public void testFancyFolding() { + if (testCase.getExpectedTypeError() != null) { + // Nothing to do + return; + } + Expression e = buildFieldExpression(testCase); + if (extra().foldable == false) { + assertThat(e.foldable(), equalTo(false)); + return; + } + assertThat(e.foldable(), equalTo(true)); + Object result = e.fold(); + if (testCase.getExpectedBuildEvaluatorWarnings() != null) { + assertWarnings(testCase.getExpectedBuildEvaluatorWarnings()); + } + if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { + result = NumericUtils.unsignedLongAsBigInteger((Long) result); + } + assertThat(result, testCase.getMatcher()); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + + public void testPartialFold() { + if (testCase.getExpectedTypeError() != null || extra().foldable()) { + // Nothing to do + return; + } + Case c = (Case) buildFieldExpression(testCase); + if (extra().expectedPartialFold == null) { + assertThat(c.partiallyFold(), sameInstance(c)); + return; + } + if (extra().expectedPartialFold.size() == 1) { + assertThat(c.partiallyFold(), equalTo(extra().expectedPartialFold.get(0).asField())); + return; + } + Case expected = build( + Source.synthetic("expected"), + extra().expectedPartialFold.stream().map(TestCaseSupplier.TypedData::asField).toList() + ); + assertThat(c.partiallyFold(), equalTo(expected)); + } + + private static Function addWarnings(List warnings) { + return c -> { + TestCaseSupplier.TestCase r = c; + for (String warning : warnings) { + r = r.withWarning(warning); + } + return r; + }; + } + + private static Function addBuildEvaluatorWarnings(List warnings) { + return c -> { + TestCaseSupplier.TestCase r = c; + for (String warning : warnings) { + r = r.withBuildEvaluatorWarning(warning); + } + return r; + }; + } + + private record Extra(boolean foldable, List expectedPartialFold) {} + + private Extra extra() { + return (Extra) testCase.extra(); + } + + @Override + protected Matcher allNullsMatcher() { + if (extra().foldable) { + return testCase.getMatcher(); + } + return super.allNullsMatcher(); + } + + private static String typeErrorMessage(boolean includeOrdinal, List types) { + if (types.get(0) != DataType.BOOLEAN && types.get(0) != DataType.NULL) { + return typeErrorMessage(includeOrdinal, types, 0, "boolean"); + } + DataType mainType = types.get(1); + for (int i = 2; i < types.size(); i++) { + if (i % 2 == 0 && i != types.size() - 1) { + // condition + if (types.get(i) != DataType.BOOLEAN && types.get(i) != DataType.NULL) { + return typeErrorMessage(includeOrdinal, types, i, "boolean"); + } + } else { + // value + if (types.get(i) != mainType) { + return typeErrorMessage(includeOrdinal, types, i, mainType.typeName()); + } + } + } + throw new IllegalStateException("can't find bad arg for " + types); + } + + private static String typeErrorMessage(boolean includeOrdinal, List types, int badArgPosition, String expectedTypeString) { + String ordinal = includeOrdinal ? TypeResolutions.ParamOrdinal.fromIndex(badArgPosition).name().toLowerCase(Locale.ROOT) + " " : ""; + String name = types.get(badArgPosition).typeName(); + return ordinal + "argument of [] must be [" + expectedTypeString + "], found value [" + name + "] type [" + name + "]"; + } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml index 92b3f4d1b084d..359ac40bc3672 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/160_union_types.yml @@ -6,7 +6,7 @@ setup: parameters: [method, path, parameters, capabilities] capabilities: [union_types, union_types_remove_fields, casting_operator] reason: "Union types and casting operator introduced in 8.15.0" - test_runner_features: [capabilities, allowed_warnings_regex] + test_runner_features: [capabilities, allowed_warnings_regex, warnings_regex] - do: indices.create: @@ -830,3 +830,65 @@ load four indices with multiple conversion functions TO_LONG and TO_IP: - match: { values.21.2: "172.21.3.15" } - match: { values.21.3: 1756467 } - match: { values.21.4: "Connected to 10.1.0.1" } + +--- +CASE: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [case_mv] + reason: "CASE support for multivalue conditions introduced in 8.16.0" + + - do: + indices.create: + index: b1 + body: + mappings: + properties: + f: + type: keyword + - do: + indices.create: + index: b2 + body: + mappings: + properties: + f: + type: boolean + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "b1"}}' + - '{"a": 1, "f": false}' + - '{"index": {"_index": "b1"}}' + - '{"a": 2, "f": [true, false]}' + - '{"index": {"_index": "b2"}}' + - '{"a": 3, "f": true}' + + - do: + warnings_regex: + - ".+evaluation of \\[f?\\] failed, treating result as false. Only first 20 failures recorded." + - ".+java.lang.IllegalArgumentException: CASE expects a single-valued boolean" + esql.query: + body: + query: 'FROM b* | EVAL c = CASE(f::BOOLEAN, "a", "b") | SORT a ASC | LIMIT 10' + + - match: { columns.0.name: "a" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "f" } + - match: { columns.1.type: "unsupported" } + - match: { columns.2.name: "c" } + - match: { columns.2.type: "keyword" } + - length: { values: 3 } + - match: { values.0.0: 1 } + - match: { values.0.1: null } + - match: { values.0.2: "b" } + - match: { values.1.0: 2 } + - match: { values.1.1: null } + - match: { values.1.2: "b" } + - match: { values.2.0: 3 } + - match: { values.2.1: null } + - match: { values.2.2: "a" } From 8a404d3a605b88b2a0758f8d6b47c2ca75bb0a6c Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 9 Sep 2024 10:57:17 -0600 Subject: [PATCH 03/31] Add comments in all-realms tests (#112627) --- .../xpack/security/authc/SecurityRealmSmokeTestCase.java | 2 ++ .../src/javaRestTest/resources/ldap_role_mapping.yml | 2 +- .../src/javaRestTest/resources/pki_role_mapping.yml | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSmokeTestCase.java b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSmokeTestCase.java index d68e20b7fb187..567c81ead334b 100644 --- a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSmokeTestCase.java +++ b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/SecurityRealmSmokeTestCase.java @@ -83,9 +83,11 @@ public abstract class SecurityRealmSmokeTestCase extends ESRestTestCase { // - AD (configured but won't work because we don't want external fixtures in this test suite) .setting("xpack.security.authc.realms.active_directory.ad3.order", "3") .setting("xpack.security.authc.realms.active_directory.ad3.domain_name", "localhost") + // role mappings don't matter, but we need to read the file as part of the test .setting("xpack.security.authc.realms.active_directory.ad3.files.role_mapping", "ldap_role_mapping.yml") // - PKI (works) .setting("xpack.security.authc.realms.pki.pki4.order", "4") + // role mappings don't matter, but we need to read the file as part of the test .setting("xpack.security.authc.realms.pki.pki4.files.role_mapping", "pki_role_mapping.yml") // - SAML (configured but won't work because we don't want external fixtures in this test suite) .setting("xpack.security.authc.realms.saml.saml5.order", "5") diff --git a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/ldap_role_mapping.yml b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/ldap_role_mapping.yml index 20876fc96e31c..73ebdf44794b7 100644 --- a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/ldap_role_mapping.yml +++ b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/ldap_role_mapping.yml @@ -1,4 +1,4 @@ -# AD Realm Role Mapping +# AD Realm Role Mapping - not actually used, we just want some valid file content. group_role: - "CN=Avengers,CN=users,DC=ad,DC=test,DC=elasticsearch,DC=com" user_role: diff --git a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/pki_role_mapping.yml b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/pki_role_mapping.yml index 60865c002d13e..0346aa57f5665 100644 --- a/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/pki_role_mapping.yml +++ b/x-pack/plugin/security/qa/smoke-test-all-realms/src/javaRestTest/resources/pki_role_mapping.yml @@ -1,4 +1,4 @@ -# Role mappings for PKI tests +# Role mappings for PKI tests - not actually used, we just want some valid file content. user: - "CN=Elasticsearch Test Node, OU=elasticsearch, O=org" From d098c4519b1edff22b0a33ea647a15cba9d95b6b Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Mon, 9 Sep 2024 19:08:37 +0200 Subject: [PATCH 04/31] Cleanup spatial functions doc-values serialization (#112663) * Cleanup of spatial function doc-values serialization After some recent work to move more serialization code to NamedWritables (#112622), it was noted that the spatial functions involving the optional loading of doc-values did things a little differently, with doc-values always only planned during local physical planning, and therefor never serialized. This PR is a followup PR to improve clarity, but also deletes an unused class (left-over from the move from QL to esql-core). Serialization is not changed, this is just a simple code cleanup PR. * Remove non-serializable fields from node info The EsqlNodeSubclassTests.testInfoParameters test looks for the largest constructor and assumes this represents the correct nodeinfo. This is no longer the case, since we made that constructor private, because it is the non-default path. --- .../aggregate/SpatialAggregateFunction.java | 1 + .../scalar/spatial/BinarySpatialFunction.java | 3 ++ .../physical/local/InsertFieldExtraction.java | 2 +- .../local/SpatialDocValuesExtraction.java | 4 +- .../esql/plan/physical/FieldExtractExec.java | 16 ++++-- .../FieldExtractExecSerializationTests.java | 7 +-- .../aggregate/SpatialAggregateFunction.java | 51 ------------------- 7 files changed, 21 insertions(+), 63 deletions(-) delete mode 100644 x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/aggregate/SpatialAggregateFunction.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java index d54d20eb4115f..5cb7edf2581d5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java @@ -29,6 +29,7 @@ protected SpatialAggregateFunction(Source source, Expression field, boolean useD protected SpatialAggregateFunction(StreamInput in, boolean useDocValues) throws IOException { super(in); + // The useDocValues field is only used on data nodes local planning, and therefor never serialized this.useDocValues = useDocValues; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java index 84d776888c7ae..72dd052fc7637 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -61,6 +61,7 @@ protected BinarySpatialFunction( } protected BinarySpatialFunction(StreamInput in, boolean leftDocValues, boolean rightDocValues, boolean pointsOnly) throws IOException { + // The doc-values fields are only used on data nodes local planning, and therefor never serialized this( Source.EMPTY, in.readNamedWriteable(Expression.class), @@ -75,6 +76,8 @@ protected BinarySpatialFunction(StreamInput in, boolean leftDocValues, boolean r public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(left()); out.writeNamedWriteable(right()); + // The doc-values fields are only used on data nodes local planning, and therefor never serialized + // The CRS type is re-resolved from the combination of left and right fields, and also not necessary to serialize } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index fad1fac46e57d..7186a5194a262 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -59,7 +59,7 @@ public PhysicalPlan apply(PhysicalPlan plan) { // add extractor if (missing.isEmpty() == false) { // collect source attributes and add the extractor - var extractor = new FieldExtractExec(p.source(), p.child(), List.copyOf(missing), Set.of()); + var extractor = new FieldExtractExec(p.source(), p.child(), List.copyOf(missing)); p = p.replaceChild(extractor); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java index 0bad99375d315..42dbc5f1a6fd5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java @@ -101,8 +101,8 @@ && allowedForDocValues(fieldAttribute, agg, foundAttributes)) { docValuesAttributes.add(found); } } - if (docValuesAttributes.size() > 0) { - exec = new FieldExtractExec(exec.source(), exec.child(), attributesToExtract, docValuesAttributes); + if (docValuesAttributes.isEmpty() == false) { + exec = fieldExtractExec.withDocValuesAttributes(docValuesAttributes); } } return exec; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java index f7c175960010d..7b51450d9f5e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExec.java @@ -46,7 +46,11 @@ public class FieldExtractExec extends UnaryExec implements EstimatesRowSize { private List lazyOutput; - public FieldExtractExec(Source source, PhysicalPlan child, List attributesToExtract, Set docValuesAttributes) { + public FieldExtractExec(Source source, PhysicalPlan child, List attributesToExtract) { + this(source, child, attributesToExtract, Set.of()); + } + + private FieldExtractExec(Source source, PhysicalPlan child, List attributesToExtract, Set docValuesAttributes) { super(source, child); this.attributesToExtract = attributesToExtract; this.sourceAttribute = extractSourceAttributesFrom(child); @@ -57,9 +61,9 @@ private FieldExtractExec(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), ((PlanStreamInput) in).readPhysicalPlanNode(), - in.readNamedWriteableCollectionAsList(Attribute.class), - Set.of() // docValueAttributes are only used on the data node and never serialized. + in.readNamedWriteableCollectionAsList(Attribute.class) ); + // docValueAttributes are only used on the data node and never serialized. } @Override @@ -96,7 +100,7 @@ protected AttributeSet computeReferences() { @Override protected NodeInfo info() { - return NodeInfo.create(this, FieldExtractExec::new, child(), attributesToExtract, docValuesAttributes); + return NodeInfo.create(this, FieldExtractExec::new, child(), attributesToExtract); } @Override @@ -104,6 +108,10 @@ public UnaryExec replaceChild(PhysicalPlan newChild) { return new FieldExtractExec(source(), newChild, attributesToExtract, docValuesAttributes); } + public FieldExtractExec withDocValuesAttributes(Set docValuesAttributes) { + return new FieldExtractExec(source(), child(), attributesToExtract, docValuesAttributes); + } + public List attributesToExtract() { return attributesToExtract; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExecSerializationTests.java index a05d5fac431a4..da77ea4c2497f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FieldExtractExecSerializationTests.java @@ -12,15 +12,13 @@ import java.io.IOException; import java.util.List; -import java.util.Set; public class FieldExtractExecSerializationTests extends AbstractPhysicalPlanSerializationTests { public static FieldExtractExec randomFieldExtractExec(int depth) { Source source = randomSource(); PhysicalPlan child = randomChild(depth); List attributesToExtract = randomFieldAttributes(1, 4, false); - Set docValuesAttributes = Set.of(); // These are never serialized - return new FieldExtractExec(source, child, attributesToExtract, docValuesAttributes); + return new FieldExtractExec(source, child, attributesToExtract); } @Override @@ -32,13 +30,12 @@ protected FieldExtractExec createTestInstance() { protected FieldExtractExec mutateInstance(FieldExtractExec instance) throws IOException { PhysicalPlan child = instance.child(); List attributesToExtract = instance.attributesToExtract(); - Set docValuesAttributes = Set.of(); // These are never serialized if (randomBoolean()) { child = randomValueOtherThan(child, () -> randomChild(0)); } else { attributesToExtract = randomValueOtherThan(attributesToExtract, () -> randomFieldAttributes(1, 4, false)); } - return new FieldExtractExec(instance.source(), child, attributesToExtract, docValuesAttributes); + return new FieldExtractExec(instance.source(), child, attributesToExtract); } @Override diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/aggregate/SpatialAggregateFunction.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/aggregate/SpatialAggregateFunction.java deleted file mode 100644 index e73d0e71eb246..0000000000000 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/function/aggregate/SpatialAggregateFunction.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ql.expression.function.aggregate; - -import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.tree.Source; - -import java.util.Objects; - -/** - * All spatial aggregate functions extend this class to enable the planning of reading from doc values for higher performance. - * The AggregateMapper class will generate multiple aggregation functions for each combination, allowing the planner to - * select the best one. - */ -public abstract class SpatialAggregateFunction extends AggregateFunction { - protected final boolean useDocValues; - - protected SpatialAggregateFunction(Source source, Expression field, boolean useDocValues) { - super(source, field); - this.useDocValues = useDocValues; - } - - public abstract SpatialAggregateFunction withDocValues(); - - @Override - public int hashCode() { - // NB: the hashcode is currently used for key generation so - // to avoid clashes between aggs with the same arguments, add the class name as variation - return Objects.hash(getClass(), children(), useDocValues); - } - - @Override - public boolean equals(Object obj) { - if (super.equals(obj)) { - SpatialAggregateFunction other = (SpatialAggregateFunction) obj; - return Objects.equals(other.field(), field()) - && Objects.equals(other.parameters(), parameters()) - && Objects.equals(other.useDocValues, useDocValues); - } - return false; - } - - public boolean useDocValues() { - return useDocValues; - } -} From fe203ed6a30a322caa2ee8468c65830fe53f38d0 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 03:14:42 +1000 Subject: [PATCH 05/31] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT test {case-functions.testSelectInsertWithLcaseAndLengthWithOrderBy} #112642 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 53570b3432721..486a5448e0b94 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -207,6 +207,9 @@ tests: - class: org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests method: test20SecurityNotAutoConfiguredOnReInstallation issue: https://github.com/elastic/elasticsearch/issues/112635 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testSelectInsertWithLcaseAndLengthWithOrderBy} + issue: https://github.com/elastic/elasticsearch/issues/112642 # Examples: # From eeaf32b70d4af7298e894870a739b0b3d5c20fe7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 03:15:00 +1000 Subject: [PATCH 06/31] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT test {case-functions.testUcaseInline1} #112641 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 486a5448e0b94..22abc632db8cb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -210,6 +210,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testSelectInsertWithLcaseAndLengthWithOrderBy} issue: https://github.com/elastic/elasticsearch/issues/112642 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUcaseInline1} + issue: https://github.com/elastic/elasticsearch/issues/112641 # Examples: # From f1a62a0142ee4f159c275cd7e586e8c3325d754f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 03:15:13 +1000 Subject: [PATCH 07/31] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT test {case-functions.testUpperCasingTheSecondLetterFromTheRightFromFirstName} #112640 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 22abc632db8cb..756193d836ef3 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -213,6 +213,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testUcaseInline1} issue: https://github.com/elastic/elasticsearch/issues/112641 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUpperCasingTheSecondLetterFromTheRightFromFirstName} + issue: https://github.com/elastic/elasticsearch/issues/112640 # Examples: # From c946617df7baecc02cce913bd5f8469025503166 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 03:15:25 +1000 Subject: [PATCH 08/31] Mute org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT test {case-functions.testUcaseInline3} #112643 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 756193d836ef3..0391504eeac6e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -216,6 +216,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testUpperCasingTheSecondLetterFromTheRightFromFirstName} issue: https://github.com/elastic/elasticsearch/issues/112640 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUcaseInline3} + issue: https://github.com/elastic/elasticsearch/issues/112643 # Examples: # From 72248e33fca6c0bd49b5f7558c760965def89d91 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Sep 2024 13:59:47 -0400 Subject: [PATCH 09/31] ESQL: Compute support for filtering grouping aggs (#112476) Adds support to the compute engine for filtering which positions are processed by grouping aggs. This should allow syntax like ``` | STATS success = COUNT(*) WHERE 200 <= response_code AND response_code < 300, redirect = COUNT(*) WHERE 300 <= response_code AND response_code < 400, client_err = COUNT(*) WHERE 400 <= response_code AND response_code < 500, server_err = COUNT(*) WHERE 500 <= response_code AND response_code < 600, total_count = COUNT(*) BY hostname ``` We could translate the WHERE expression into an `ExpressionEvaluator` and run it, then plug it into the filtering support added in this PR. The actual filtering is done by creating a `FilteredGroupingAggregatorFunction` which runs wraps a regular `GroupingAggregatorFunction` first executing the filter against the incoming `Page` and then `null`ing any positions in the group that don't match. Then passing the resulting groups into the real aggregator. When the real grouping aggregator implementation sees `null` value for groups it skips collecting that position. We had to make two changes to every agg for this to work: 1. Add a method to force local group tracking mode on any aggregator. Previously this was only required if the agg encountered `null` values, but when we're filtering aggs we can no longer trust the `seen` parameter we get when building the result. This local group tracking mode let's us track what we've actually seen locally. 2. Add `Releasable` to the `AddInput` thing we use to handle chunked pages in grouping aggs. This is required because the results of the filter must be closed on completion. Both of these are fairly trivial changes, but require touching every aggregation. --- .../gen/GroupingAggregatorImplementer.java | 12 ++ ...inctBooleanGroupingAggregatorFunction.java | 13 ++ ...nctBytesRefGroupingAggregatorFunction.java | 13 ++ ...tinctDoubleGroupingAggregatorFunction.java | 13 ++ ...stinctFloatGroupingAggregatorFunction.java | 13 ++ ...DistinctIntGroupingAggregatorFunction.java | 13 ++ ...istinctLongGroupingAggregatorFunction.java | 13 ++ .../MaxBooleanGroupingAggregatorFunction.java | 13 ++ ...MaxBytesRefGroupingAggregatorFunction.java | 13 ++ .../MaxDoubleGroupingAggregatorFunction.java | 13 ++ .../MaxFloatGroupingAggregatorFunction.java | 13 ++ .../MaxIntGroupingAggregatorFunction.java | 13 ++ .../MaxIpGroupingAggregatorFunction.java | 13 ++ .../MaxLongGroupingAggregatorFunction.java | 13 ++ ...ationDoubleGroupingAggregatorFunction.java | 13 ++ ...iationFloatGroupingAggregatorFunction.java | 13 ++ ...eviationIntGroupingAggregatorFunction.java | 13 ++ ...viationLongGroupingAggregatorFunction.java | 13 ++ .../MinBooleanGroupingAggregatorFunction.java | 13 ++ ...MinBytesRefGroupingAggregatorFunction.java | 13 ++ .../MinDoubleGroupingAggregatorFunction.java | 13 ++ .../MinFloatGroupingAggregatorFunction.java | 13 ++ .../MinIntGroupingAggregatorFunction.java | 13 ++ .../MinIpGroupingAggregatorFunction.java | 13 ++ .../MinLongGroupingAggregatorFunction.java | 13 ++ ...ntileDoubleGroupingAggregatorFunction.java | 13 ++ ...entileFloatGroupingAggregatorFunction.java | 13 ++ ...rcentileIntGroupingAggregatorFunction.java | 13 ++ ...centileLongGroupingAggregatorFunction.java | 13 ++ .../RateDoubleGroupingAggregatorFunction.java | 13 ++ .../RateFloatGroupingAggregatorFunction.java | 13 ++ .../RateIntGroupingAggregatorFunction.java | 13 ++ .../RateLongGroupingAggregatorFunction.java | 13 ++ .../SumDoubleGroupingAggregatorFunction.java | 13 ++ .../SumFloatGroupingAggregatorFunction.java | 13 ++ .../SumIntGroupingAggregatorFunction.java | 13 ++ .../SumLongGroupingAggregatorFunction.java | 13 ++ .../TopBooleanGroupingAggregatorFunction.java | 13 ++ .../TopDoubleGroupingAggregatorFunction.java | 13 ++ .../TopFloatGroupingAggregatorFunction.java | 13 ++ .../TopIntGroupingAggregatorFunction.java | 13 ++ .../TopIpGroupingAggregatorFunction.java | 13 ++ .../TopLongGroupingAggregatorFunction.java | 13 ++ ...luesBooleanGroupingAggregatorFunction.java | 13 ++ ...uesBytesRefGroupingAggregatorFunction.java | 13 ++ ...aluesDoubleGroupingAggregatorFunction.java | 13 ++ ...ValuesFloatGroupingAggregatorFunction.java | 13 ++ .../ValuesIntGroupingAggregatorFunction.java | 13 ++ .../ValuesLongGroupingAggregatorFunction.java | 13 ++ ...ntDocValuesGroupingAggregatorFunction.java | 13 ++ ...ourceValuesGroupingAggregatorFunction.java | 13 ++ ...ntDocValuesGroupingAggregatorFunction.java | 13 ++ ...ourceValuesGroupingAggregatorFunction.java | 13 ++ .../CountGroupingAggregatorFunction.java | 11 ++ .../FilteredAggregatorFunctionSupplier.java | 46 +++++ .../FilteredGroupingAggregatorFunction.java | 117 ++++++++++++ ...FromPartialGroupingAggregatorFunction.java | 8 + .../aggregation/GroupingAggregator.java | 3 + .../GroupingAggregatorFunction.java | 20 ++- .../ToPartialGroupingAggregatorFunction.java | 5 + .../aggregation/blockhash/AddBlock.java | 3 +- .../table/BlockHashRowInTableLookup.java | 3 + .../operator/HashAggregationOperator.java | 21 ++- ...lteredGroupingAggregatorFunctionTests.java | 169 ++++++++++++++++++ .../GroupingAggregatorFunctionTestCase.java | 27 ++- .../aggregation/blockhash/AddBlockTests.java | 11 ++ .../aggregation/blockhash/BlockHashTests.java | 9 + .../function/AbstractAggregationTestCase.java | 35 ++-- 68 files changed, 1145 insertions(+), 31 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 3dffbcf84eb78..23240bbd50ea6 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -182,6 +182,7 @@ private TypeSpec type() { builder.addMethod(addRawInputLoop(INT_VECTOR, valueVectorType(init, combine))); builder.addMethod(addRawInputLoop(INT_BLOCK, valueBlockType(init, combine))); builder.addMethod(addRawInputLoop(INT_BLOCK, valueVectorType(init, combine))); + builder.addMethod(selectedMayContainUnseenGroups()); builder.addMethod(addIntermediateInput()); builder.addMethod(addIntermediateRowInput()); builder.addMethod(evaluateIntermediate()); @@ -338,6 +339,9 @@ private TypeSpec addInput(Consumer addBlock) { addBlock.accept(vector); builder.addMethod(vector.build()); + MethodSpec.Builder close = MethodSpec.methodBuilder("close").addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addMethod(close.build()); + return builder.build(); } @@ -485,6 +489,14 @@ private void combineRawInputForBytesRef(MethodSpec.Builder builder, String block builder.addStatement("$T.combine(state, groupId, $L.getBytesRef($L, scratch))", declarationType, blockVariable, offsetVariable); } + private MethodSpec selectedMayContainUnseenGroups() { + MethodSpec.Builder builder = MethodSpec.methodBuilder("selectedMayContainUnseenGroups"); + builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); + builder.addParameter(SEEN_GROUP_IDS, "seenGroupIds"); + builder.addStatement("state.enableGroupIdTracking(seenGroupIds)"); + return builder.build(); + } + private MethodSpec addIntermediateInput() { MethodSpec.Builder builder = MethodSpec.methodBuilder("addIntermediateInput"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java index 98e57b71db416..4cdecd9944f7b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java index 35fd83598b9d6..2261a60ff247e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -151,6 +159,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java index 894b81b311363..c769a157e5ecb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java index 5f6b4211e6c5e..0b1c93aad5e2b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctFloatGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java index 83300393e560d..7642ca7dcc6a0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -147,6 +155,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java index 44e9fefb3161c..00d0e955ba88a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java index 084e346a7b093..dd7760273bfa6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBooleanGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java index a50cf8593a6e1..fcb87428e9b7d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxBytesRefGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -151,6 +159,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java index b874bc43dc238..42588ea81367c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java index f3ebd468ebc72..006ee147b15e1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxFloatGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java index 8b364e7a02e96..faea5a63eac93 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java index a722d95f3b108..f5715949094f7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIpGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -151,6 +159,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java index fee2f5a9c2e7c..a5f115ad0d2b1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java index 836248428f231..4a6fc2bfce4f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java index 7a67f0d3449f0..35f18ef0df552 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationFloatGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java index 315034a28ff8f..9819f4472c1a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java index af0374012be52..70da9fb19568f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java index 45e677ee25b56..71e636001cd5f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBooleanGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java index e092dd93210f6..1650c6c513fdd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinBytesRefGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -151,6 +159,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java index 970a8a7597514..729c77a225049 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java index 4e8b4cc9417c8..d3d59935e62d5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinFloatGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java index 6e976a582a892..7095608ca50cc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -144,6 +152,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java index 146515d363af7..0cb4154009a90 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIpGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -151,6 +159,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java index a3db9a2704660..0498c4b8d866b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java index 871e93a72d900..c10d25c059682 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java index 8b0f28b2632d1..982b07da1bd8d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileFloatGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java index fc1031dcbe0d0..ed50eb683ba97 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -147,6 +155,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java index 1b14f02356b8f..12f64133d10f2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java index c85cf78a39c45..2fca5c1d19c5e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -85,6 +85,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -97,6 +101,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); } + + @Override + public void close() { + } }; } @@ -162,6 +170,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java index a5d2131a2445a..628503f12900e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java @@ -87,6 +87,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -99,6 +103,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); } + + @Override + public void close() { + } }; } @@ -164,6 +172,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java index 0fb0b05c11164..2f030544da612 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -85,6 +85,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -97,6 +101,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); } + + @Override + public void close() { + } }; } @@ -162,6 +170,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values, } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java index 82297b618b03e..fd272e47fa6a3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -85,6 +85,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -97,6 +101,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); } + + @Override + public void close() { + } }; } @@ -162,6 +170,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values, } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java index 4f0bcae66ee4a..71b282c58aca2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleGroupingAggregatorFunction.java @@ -76,6 +76,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -88,6 +92,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -147,6 +155,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java index 2f4165dfeadfa..664f616acee9d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumFloatGroupingAggregatorFunction.java @@ -78,6 +78,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -90,6 +94,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -149,6 +157,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java index 95d380c455bf4..2f369374d8cdb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java index 324d8f53e65cb..c8c0990de4e54 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongGroupingAggregatorFunction.java @@ -75,6 +75,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -87,6 +91,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -146,6 +154,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java index d169c456329b7..cd35595eeadb0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopBooleanGroupingAggregatorFunction.java @@ -79,6 +79,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -91,6 +95,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -150,6 +158,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java index 07da387f88ce6..6b76ff7772ad1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopDoubleGroupingAggregatorFunction.java @@ -79,6 +79,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -91,6 +95,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -150,6 +158,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java index 369fa7401e508..ffaf858645440 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopFloatGroupingAggregatorFunction.java @@ -79,6 +79,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -91,6 +95,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -150,6 +158,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java index 04b53fe6aab69..a3453126e055e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIntGroupingAggregatorFunction.java @@ -77,6 +77,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -89,6 +93,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -148,6 +156,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java index 272b4827b5817..74a6987962b78 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopIpGroupingAggregatorFunction.java @@ -80,6 +80,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -92,6 +96,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -155,6 +163,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java index 9d1ed395c5964..b4a4b7154e626 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/TopLongGroupingAggregatorFunction.java @@ -79,6 +79,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -91,6 +95,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -150,6 +158,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java index 062a49dbf4f7c..b51da118e0f8d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanGroupingAggregatorFunction.java @@ -72,6 +72,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -84,6 +88,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -143,6 +151,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BooleanVector valu } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java index 0a929913e9fde..bdce606f92168 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBytesRefGroupingAggregatorFunction.java @@ -73,6 +73,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -85,6 +89,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -148,6 +156,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java index b8ca2d2b9665b..5b8c2ac802663 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunction.java @@ -72,6 +72,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -84,6 +88,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -143,6 +151,11 @@ private void addRawInput(int positionOffset, IntBlock groups, DoubleVector value } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java index 0c4e9c32328c7..f50c5a67d15a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunction.java @@ -72,6 +72,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -84,6 +88,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -143,6 +151,11 @@ private void addRawInput(int positionOffset, IntBlock groups, FloatVector values } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java index 95e527c018cd1..c90fcedb291cf 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunction.java @@ -70,6 +70,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -82,6 +86,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -141,6 +149,11 @@ private void addRawInput(int positionOffset, IntBlock groups, IntVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java index a7963447037a8..8a79cd7d942ee 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunction.java @@ -72,6 +72,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -84,6 +88,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -143,6 +151,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java index dc3c1cf2917ec..cc2fb38bb925c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointDocValuesGroupingAggregatorFunction.java @@ -81,6 +81,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -93,6 +97,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -152,6 +160,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java index 0d1378ce988f3..6ae2b444efe98 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidCartesianPointSourceValuesGroupingAggregatorFunction.java @@ -84,6 +84,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -96,6 +100,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -159,6 +167,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java index f5604e9e23200..0cce9b7cf1cd5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointDocValuesGroupingAggregatorFunction.java @@ -81,6 +81,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -93,6 +97,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -152,6 +160,11 @@ private void addRawInput(int positionOffset, IntBlock groups, LongVector values) } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java index b3caeef925a73..6c4d6635846df 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialCentroidGeoPointSourceValuesGroupingAggregatorFunction.java @@ -84,6 +84,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() { + } }; } return new GroupingAggregatorFunction.AddInput() { @@ -96,6 +100,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesVector); } + + @Override + public void close() { + } }; } @@ -159,6 +167,11 @@ private void addRawInput(int positionOffset, IntBlock groups, BytesRefVector val } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java index 5dba070172ae9..f610abf271cfa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java @@ -76,6 +76,9 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(positionOffset, groupIds, valuesBlock); } + + @Override + public void close() {} }; } } @@ -89,6 +92,9 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addRawInput(groupIds); } + + @Override + public void close() {} }; } @@ -149,6 +155,11 @@ private void addRawInput(IntBlock groups) { } } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { assert channels.size() == intermediateBlockCount(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..c8a8696c03449 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionSupplier.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; + +/** + * A {@link AggregatorFunctionSupplier} that wraps another, filtering which positions + * are supplied to the aggregator. + */ +public record FilteredAggregatorFunctionSupplier(AggregatorFunctionSupplier next, EvalOperator.ExpressionEvaluator.Factory filter) + implements + AggregatorFunctionSupplier { + + @Override + public AggregatorFunction aggregator(DriverContext driverContext) { + throw new UnsupportedOperationException("TODO"); + } + + @Override + public GroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + GroupingAggregatorFunction next = this.next.groupingAggregator(driverContext); + EvalOperator.ExpressionEvaluator filter = null; + try { + filter = this.filter.get(driverContext); + GroupingAggregatorFunction result = new FilteredGroupingAggregatorFunction(next, filter); + next = null; + filter = null; + return result; + } finally { + Releasables.closeExpectNoException(next, filter); + } + } + + @Override + public String describe() { + return "Filtered[next=" + next.describe() + ", filter=" + filter + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..c8dd80d7afe99 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.ToMask; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; + +import java.util.stream.IntStream; + +/** + * A {@link GroupingAggregatorFunction} that wraps another, filtering which positions + * are supplied to the aggregator. + */ +record FilteredGroupingAggregatorFunction(GroupingAggregatorFunction next, EvalOperator.ExpressionEvaluator filter) + implements + GroupingAggregatorFunction { + + FilteredGroupingAggregatorFunction { + next.selectedMayContainUnseenGroups(new SeenGroupIds.Empty()); + } + + @Override + public AddInput prepareProcessPage(SeenGroupIds seenGroupIds, Page page) { + try (BooleanBlock filterResult = ((BooleanBlock) filter.eval(page))) { + ToMask mask = filterResult.toMask(); + // TODO warn on mv fields + AddInput nextAdd = null; + try { + nextAdd = next.prepareProcessPage(seenGroupIds, page); + AddInput result = new FilteredAddInput(mask.mask(), nextAdd, page.getPositionCount()); + mask = null; + nextAdd = null; + return result; + } finally { + Releasables.close(mask, nextAdd); + } + } + } + + private record FilteredAddInput(BooleanVector mask, AddInput nextAdd, int positionCount) implements AddInput { + @Override + public void add(int positionOffset, IntBlock groupIds) { + if (positionOffset == 0) { + try (IntBlock filtered = groupIds.keepMask(mask)) { + nextAdd.add(positionOffset, filtered); + } + } else { + try ( + BooleanVector offsetMask = mask.filter( + IntStream.range(positionOffset, positionOffset + groupIds.getPositionCount()).toArray() + ); + IntBlock filtered = groupIds.keepMask(offsetMask) + ) { + nextAdd.add(positionOffset, filtered); + } + } + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + Releasables.close(mask, nextAdd); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + // nothing to do - we already put the underlying agg into this state + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) { + next.addIntermediateInput(positionOffset, groupIdVector, page); + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + next.addIntermediateRowInput(groupId, input, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + next.evaluateIntermediate(blocks, offset, selected); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + next.evaluateFinal(blocks, offset, selected, driverContext); + } + + @Override + public int intermediateBlockCount() { + return next.intermediateBlockCount(); + } + + @Override + public void close() { + Releasables.closeExpectNoException(next, filter); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java index 675fbe88f1984..5c1a223404564 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FromPartialGroupingAggregatorFunction.java @@ -51,9 +51,17 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { addIntermediateInput(positionOffset, groupIds, page); } + + @Override + public void close() {} }; } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + delegate.selectedMayContainUnseenGroups(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) { final CompositeBlock inputBlock = page.getBlock(inputChannel); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregator.java index 21bcded6caee1..3612ca9996192 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregator.java @@ -49,6 +49,9 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { aggregatorFunction.addIntermediateInput(positionOffset, groupIds, page); } + + @Override + public void close() {} }; } else { return aggregatorFunction.prepareProcessPage(seenGroupIds, page); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunction.java index b2f8e6b1bc33d..fbd2ddaa816b7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunction.java @@ -24,7 +24,7 @@ public interface GroupingAggregatorFunction extends Releasable { * Consume group ids to cause the {@link GroupingAggregatorFunction} * to group values at a particular position into a particular group. */ - interface AddInput { + interface AddInput extends Releasable { /** * Send a batch of group ids to the aggregator. The {@code groupIds} * may be offset from the start of the block to allow for sending chunks @@ -43,6 +43,12 @@ interface AddInput { * {@code groupIds} {@linkplain Block} that contains thousands of * values at a single positions. *

    + *

    + * Finally, it's possible for a single position to be collected into + * groupIds. In that case it's positionOffset may + * be skipped entirely or the groupIds block could contain a + * {@code null} value at that position. + *

    * @param positionOffset offset into the {@link Page} used to build this * {@link AddInput} of these ids * @param groupIds {@link Block} of group id, some of which may be null @@ -68,7 +74,7 @@ interface AddInput { } /** - * Prepare to process a single page of results. + * Prepare to process a single page of input. *

    * This should load the input {@link Block}s and check their types and * select an optimal path and return that path as an {@link AddInput}. @@ -76,6 +82,16 @@ interface AddInput { */ AddInput prepareProcessPage(SeenGroupIds seenGroupIds, Page page); // TODO allow returning null to opt out of the callback loop + /** + * Call this to signal to the aggregation that the {@code selected} + * parameter that's passed to {@link #evaluateIntermediate} or + * {@link #evaluateFinal} may reference groups that haven't been + * seen. This puts the underlying storage into a mode where it'll + * track which group ids have been seen, even if that increases the + * overhead. + */ + void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds); + /** * Add data produced by {@link #evaluateIntermediate}. */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java index 13d4bd5d6c0d6..18b907a3d7080 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/ToPartialGroupingAggregatorFunction.java @@ -60,6 +60,11 @@ public AddInput prepareProcessPage(SeenGroupIds seenGroupIds, Page page) { return delegate.prepareProcessPage(seenGroupIds, page); } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + delegate.selectedMayContainUnseenGroups(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Page page) { final CompositeBlock inputBlock = page.getBlock(channels.get(0)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/AddBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/AddBlock.java index 786c61e6f602a..496624fc1189d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/AddBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/AddBlock.java @@ -13,6 +13,7 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; /** * Helper for adding a {@link Page} worth of {@link Block}s to a {@link BlockHash} @@ -149,6 +150,6 @@ private void rollover(int position) { @Override public void close() { - ords.close(); + Releasables.closeExpectNoException(ords, addInput); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java index 1acd1c30ed334..c198853bb36ad 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/table/BlockHashRowInTableLookup.java @@ -65,6 +65,9 @@ public void add(int positionOffset, IntVector groupIds) { lastOrd = ord; } } + + @Override + public void close() {} }); success = true; } finally { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 42bc75a49f4a7..03a4ca2b0ad5e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -150,18 +150,23 @@ private void end() { hashStart = System.nanoTime(); aggregationNanos += hashStart - aggStart; } + + @Override + public void close() { + Releasables.closeExpectNoException(prepared); + } } - AddInput add = new AddInput(); + try (AddInput add = new AddInput()) { + checkState(needsInput(), "Operator is already finishing"); + requireNonNull(page, "page is null"); - checkState(needsInput(), "Operator is already finishing"); - requireNonNull(page, "page is null"); + for (int i = 0; i < prepared.length; i++) { + prepared[i] = aggregators.get(i).prepareProcessPage(blockHash, page); + } - for (int i = 0; i < prepared.length; i++) { - prepared[i] = aggregators.get(i).prepareProcessPage(blockHash, page); + blockHash.add(wrapPage(page), add); + hashNanos += System.nanoTime() - add.hashStart; } - - blockHash.add(wrapPage(page), add); - hashNanos += System.nanoTime() - add.hashStart; } finally { page.releaseBlocks(); pagesProcessed++; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java new file mode 100644 index 0000000000000..7b924076c0186 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.LongIntBlockSourceOperator; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Tuple; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class FilteredGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { + private final List unclosed = Collections.synchronizedList(new ArrayList<>()); + + // TODO some version of this test that applies across all aggs + @Override + protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { + return new FilteredAggregatorFunctionSupplier( + new SumIntAggregatorFunctionSupplier(inputChannels), + new AnyGreaterThanFactory(unclosed, inputChannels) + ); + } + + @Override + protected String expectedDescriptionOfAggregator() { + return "Filtered[next=sum of ints, filter=any > 0]"; + } + + @Override + protected String expectedToStringOfSimpleAggregator() { + return "FilteredGroupingAggregatorFunction[next=SumIntGroupingAggregatorFunction[channels=[1]], filter=any > 0]"; + } + + @Override + protected void assertSimpleGroup(List input, Block result, int position, Long group) { + long sum = 0; + for (Page page : input) { + LongBlock groups = page.getBlock(0); + IntBlock ints = page.getBlock(1); + for (int p = 0; p < ints.getPositionCount(); p++) { + /* + * Perform the sum on the values *only* if: + * 1. Any of the values is > 0 to line up with the condition + * 2. Any of the groups matches the group we're asserting + */ + int start = ints.getFirstValueIndex(p); + int end = start + ints.getValueCount(p); + boolean selected = false; + for (int i = start; i < end; i++) { + selected |= ints.getInt(i) > 0; + } + if (selected == false) { + continue; + } + selected = false; + if (group == null) { + selected = groups.isNull(p); + } else { + start = groups.getFirstValueIndex(p); + end = start + groups.getValueCount(p); + for (int i = start; i < end; i++) { + selected |= groups.getLong(i) == group; + } + } + if (selected == false) { + continue; + } + + start = ints.getFirstValueIndex(p); + end = start + ints.getValueCount(p); + for (int i = start; i < end; i++) { + sum += ints.getInt(i); + } + } + } + assertThat(((LongBlock) result).getLong(position), equalTo(sum)); + } + + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + int max = between(1, Integer.MAX_VALUE / size / 5); + return new LongIntBlockSourceOperator( + blockFactory, + IntStream.range(0, size).mapToObj(l -> Tuple.tuple(randomLongBetween(0, 4), between(-max, max))) + ); + } + + @After + public void checkUnclosed() { + for (Exception tracker : unclosed) { + logger.error("unclosed", tracker); + } + assertThat(unclosed, empty()); + } + + /** + * This checks if *any* of the integers are > 0. If so we push the group to + * the aggregation. + */ + private record AnyGreaterThanFactory(List unclosed, List inputChannels) + implements + EvalOperator.ExpressionEvaluator.Factory { + @Override + public EvalOperator.ExpressionEvaluator get(DriverContext context) { + Exception tracker = new Exception(Integer.toString(unclosed.size())); + unclosed.add(tracker); + return new AnyGreaterThan(context.blockFactory(), unclosed, tracker, inputChannels); + } + + @Override + public String toString() { + return "any > 0"; + } + } + + private record AnyGreaterThan(BlockFactory blockFactory, List unclosed, Exception tracker, List inputChannels) + implements + EvalOperator.ExpressionEvaluator { + @Override + public Block eval(Page page) { + IntBlock ints = page.getBlock(inputChannels.get(0)); + try (BooleanVector.FixedBuilder result = blockFactory.newBooleanVectorFixedBuilder(ints.getPositionCount())) { + position: for (int p = 0; p < ints.getPositionCount(); p++) { + int start = ints.getFirstValueIndex(p); + int end = start + ints.getValueCount(p); + for (int i = start; i < end; i++) { + if (ints.getInt(i) > 0) { + result.appendBoolean(p, true); + continue position; + } + } + result.appendBoolean(p, false); + } + return result.build().asBlock(); + } + } + + @Override + public void close() { + if (unclosed.remove(tracker) == false) { + throw new IllegalStateException("close failure!"); + } + } + + @Override + public String toString() { + return "any > 0"; + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index f6558d54b2779..de9337f5fce2c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -52,11 +52,14 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +/** + * Shared tests for testing grouped aggregations. + */ public abstract class GroupingAggregatorFunctionTestCase extends ForkingOperatorTestCase { protected abstract AggregatorFunctionSupplier aggregatorFunction(List inputChannels); protected final int aggregatorIntermediateBlockCount() { - try (var agg = aggregatorFunction(List.of()).aggregator(driverContext())) { + try (var agg = aggregatorFunction(List.of()).groupingAggregator(driverContext())) { return agg.intermediateBlockCount(); } } @@ -101,16 +104,20 @@ protected final Matcher expectedDescriptionOfSimple() { @Override protected final Matcher expectedToStringOfSimple() { String hash = "blockHash=LongBlockHash{channel=0, entries=0, seenNull=false}"; - String type = getClass().getSimpleName().replace("Tests", ""); return equalTo( "HashAggregationOperator[" + hash + ", aggregators=[GroupingAggregator[aggregatorFunction=" - + type - + "[channels=[1]], mode=SINGLE]]]" + + expectedToStringOfSimpleAggregator() + + ", mode=SINGLE]]]" ); } + protected String expectedToStringOfSimpleAggregator() { + String type = getClass().getSimpleName().replace("Tests", ""); + return type + "[channels=[1]]"; + } + private SeenGroups seenGroups(List input) { boolean seenNullGroup = false; SortedSet seenGroups = new TreeSet<>(); @@ -544,7 +551,7 @@ public GroupingAggregatorFunction groupingAggregator(DriverContext driverContext @Override public AddInput prepareProcessPage(SeenGroupIds ignoredSeenGroupIds, Page page) { return new AddInput() { - AddInput delegateAddInput = delegate.prepareProcessPage(bigArrays -> { + final AddInput delegateAddInput = delegate.prepareProcessPage(bigArrays -> { BitArray seen = new BitArray(0, bigArrays); seen.or(seenGroupIds); return seen; @@ -595,9 +602,19 @@ public void add(int positionOffset, IntVector groupIds) { delegateAddInput.add(positionOffset + offset, blockFactory.newIntArrayVector(chunk, count)); } } + + @Override + public void close() { + delegateAddInput.close(); + } }; } + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + delegate.selectedMayContainUnseenGroups(seenGroupIds); + } + @Override public void addIntermediateInput(int positionOffset, IntVector groupIds, Page page) { int[] chunk = new int[emitChunkSize]; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/AddBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/AddBlockTests.java index fbe696aa2997b..da9529cb761ef 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/AddBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/AddBlockTests.java @@ -39,6 +39,7 @@ public void testSv() { } expected.add(added(3, 4)); assertThat(result.added, equalTo(expected)); + assertThat(result.closed, equalTo(true)); } public void testMvBlockEndsOnBatchBoundary() { @@ -62,6 +63,7 @@ public void testMvBlockEndsOnBatchBoundary() { // We uselessly flush an empty position if emitBatchSize lines up with the total count expected.add(new Added(1, List.of(List.of()))); assertThat(result.added, equalTo(expected)); + assertThat(result.closed, equalTo(true)); } public void testMvPositionEndOnBatchBoundary() { @@ -83,6 +85,7 @@ public void testMvPositionEndOnBatchBoundary() { // Because the first position ended on a block boundary we uselessly emit an empty position there expected.add(new Added(0, List.of(List.of(), List.of(0, 2)))); assertThat(result.added, equalTo(expected)); + assertThat(result.closed, equalTo(true)); } public void testMv() { @@ -103,6 +106,7 @@ public void testMv() { } expected.add(new Added(1, List.of(List.of(2)))); assertThat(result.added, equalTo(expected)); + assertThat(result.closed, equalTo(true)); } @After @@ -117,6 +121,8 @@ Added added(int positionOffset, int... ords) { } private class TestAddInput implements GroupingAggregatorFunction.AddInput { + private boolean closed = false; + private final List added = new ArrayList<>(); @Override @@ -139,5 +145,10 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { add(positionOffset, groupIds.asBlock()); } + + @Override + public void close() { + closed = true; + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index 259d4f1249d69..c4042ea15afc6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -1166,6 +1166,9 @@ public void add(int positionOffset, IntVector groupIds) { groupIds.incRef(); output1.add(new Output(positionOffset, null, groupIds)); } + + @Override + public void close() {} }); hash2.add(page, new GroupingAggregatorFunction.AddInput() { @Override @@ -1179,6 +1182,9 @@ public void add(int positionOffset, IntVector groupIds) { groupIds.incRef(); output2.add(new Output(positionOffset, null, groupIds)); } + + @Override + public void close() {} }); assertThat(output1.size(), equalTo(output1.size())); for (int i = 0; i < output1.size(); i++) { @@ -1297,6 +1303,9 @@ public void add(int positionOffset, IntBlock groupIds) { public void add(int positionOffset, IntVector groupIds) { add(positionOffset, groupIds.asBlock()); } + + @Override + public void close() {} }); if (blockHash instanceof LongLongBlockHash == false && blockHash instanceof BytesRefLongBlockHash == false diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 54db9afa291ad..eb9f10f7b2e0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -11,6 +11,7 @@ import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.aggregation.GroupingAggregator; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.ElementType; @@ -455,22 +456,26 @@ private void processPageGrouping(GroupingAggregator aggregator, Page inputPage, for (int currentGroupOffset = 0; currentGroupOffset < groupCount;) { int groupSliceRemainingSize = Math.min(groupSliceSize, groupCount - currentGroupOffset); var seenGroupIds = new SeenGroupIds.Range(0, allValuesNull ? 0 : currentGroupOffset + groupSliceRemainingSize); - var addInput = aggregator.prepareProcessPage(seenGroupIds, inputPage); - - var positionCount = inputPage.getPositionCount(); - var dataSliceSize = 1; - // Divide data in chunks - for (int currentDataOffset = 0; currentDataOffset < positionCount;) { - int dataSliceRemainingSize = Math.min(dataSliceSize, positionCount - currentDataOffset); - try ( - var groups = makeGroupsVector(currentGroupOffset, currentGroupOffset + groupSliceRemainingSize, dataSliceRemainingSize) - ) { - addInput.add(currentDataOffset, groups); - } + try (GroupingAggregatorFunction.AddInput addInput = aggregator.prepareProcessPage(seenGroupIds, inputPage)) { + var positionCount = inputPage.getPositionCount(); + var dataSliceSize = 1; + // Divide data in chunks + for (int currentDataOffset = 0; currentDataOffset < positionCount;) { + int dataSliceRemainingSize = Math.min(dataSliceSize, positionCount - currentDataOffset); + try ( + var groups = makeGroupsVector( + currentGroupOffset, + currentGroupOffset + groupSliceRemainingSize, + dataSliceRemainingSize + ) + ) { + addInput.add(currentDataOffset, groups); + } - currentDataOffset += dataSliceSize; - if (positionCount > currentDataOffset) { - dataSliceSize = randomIntBetween(1, Math.min(100, positionCount - currentDataOffset)); + currentDataOffset += dataSliceSize; + if (positionCount > currentDataOffset) { + dataSliceSize = randomIntBetween(1, Math.min(100, positionCount - currentDataOffset)); + } } } From 04d192921d467286eef5f82060d86d21d986dabc Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Mon, 9 Sep 2024 14:55:44 -0400 Subject: [PATCH 10/31] Allow fields with dots in sparse vector field mapper (#111981) * Remove dot validation for sparse vector field mapper * Update docs/changelog/111981.yaml * Update changelog * Fix test permissions * PR feedback - yaml test * PR feedback - remove non-dot values from sparse vector query in test to verify the dot is searched correctly * Add additional test cases for field collissions * Update docs/changelog/111981.yaml * Update key for SparseVectorFieldMapper * Fix test * Update yaml test to include scores * Update server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java Co-authored-by: Jim Ferenczi * Revert "Update server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java" This reverts commit 58fc087535484698426d96d02dd45521fa43d519. * PR feedback - escape dots --------- Co-authored-by: Elastic Machine Co-authored-by: Jim Ferenczi --- docs/changelog/111981.yaml | 6 + .../vectors/SparseVectorFieldMapper.java | 8 +- .../vectors/SparseVectorFieldMapperTests.java | 28 ++- .../test/ml/sparse_vector_search.yml | 163 ++++++++++++++++++ 4 files changed, 192 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/111981.yaml diff --git a/docs/changelog/111981.yaml b/docs/changelog/111981.yaml new file mode 100644 index 0000000000000..13b8fe4b7e38d --- /dev/null +++ b/docs/changelog/111981.yaml @@ -0,0 +1,6 @@ +pr: 111981 +summary: Allow fields with dots in sparse vector field mapper +area: Mapping +type: enhancement +issues: + - 109118 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index 0e05d49f35a6e..7155fc1161ed1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -177,15 +177,11 @@ public void parse(DocumentParserContext context) throws IOException { for (Token token = context.parser().nextToken(); token != Token.END_OBJECT; token = context.parser().nextToken()) { if (token == Token.FIELD_NAME) { feature = context.parser().currentName(); - if (feature.contains(".")) { - throw new IllegalArgumentException( - "[sparse_vector] fields do not support dots in feature names but found [" + feature + "]" - ); - } } else if (token == Token.VALUE_NULL) { // ignore feature, this is consistent with numeric fields } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { - final String key = fullPath() + "." + feature; + // Use a delimiter that won't collide with subfields & escape the dots in the feature name + final String key = fullPath() + "\\." + feature.replace(".", "\\."); float value = context.parser().floatValue(true); // if we have an existing feature of the same name we'll select for the one with the max value diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java index 271f0c12be611..9cfbbad5ebf50 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java @@ -111,12 +111,26 @@ public void testDefaults() throws Exception { public void testDotInFieldName() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - DocumentParsingException ex = expectThrows( - DocumentParsingException.class, - () -> mapper.parse(source(b -> b.field("field", Map.of("politi.cs", 10, "sports", 20)))) - ); - assertThat(ex.getCause().getMessage(), containsString("do not support dots in feature names")); - assertThat(ex.getCause().getMessage(), containsString("politi.cs")); + ParsedDocument parsedDocument = mapper.parse(source(b -> b.field("field", Map.of("foo.bar", 10, "foobar", 20)))); + + List fields = parsedDocument.rootDoc().getFields("field"); + assertEquals(2, fields.size()); + assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = null; + FeatureField featureField2 = null; + for (IndexableField field : fields) { + if (field.stringValue().equals("foo.bar")) { + featureField1 = (FeatureField) field; + } else if (field.stringValue().equals("foobar")) { + featureField2 = (FeatureField) field; + } else { + throw new UnsupportedOperationException(); + } + } + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); } public void testHandlesMultiValuedFields() throws MapperParsingException, IOException { @@ -156,7 +170,7 @@ public void testHandlesMultiValuedFields() throws MapperParsingException, IOExce })); // then validate that the generate document stored both values appropriately and we have only the max value stored - FeatureField barField = ((FeatureField) doc1.rootDoc().getByKey("foo.field.bar")); + FeatureField barField = ((FeatureField) doc1.rootDoc().getByKey("foo.field\\.bar")); assertEquals(20, barField.getFeatureValue(), 1); FeatureField storedBarField = ((FeatureField) doc1.rootDoc().getFields("foo.field").get(1)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml index 75823d22504f3..332981a580802 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/sparse_vector_search.yml @@ -313,3 +313,166 @@ setup: query: "octopus comforter smells" - match: { status: 400 } + +--- +"Search on a sparse_vector field with dots in the field names": + + - requires: + cluster_features: [ "gte_v8.16.0" ] + reason: dots in field names allowed starting in in 8.16.0 + + - do: + indices.create: + index: index-with-sparse-vector2 + body: + mappings: + properties: + ml.tokens: + type: sparse_vector + + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: index-with-sparse-vector2 + id: "has-dots" + refresh: true + body: + ml: + tokens: + running: 2.4097164 + good: 2.170997 + run: 2.052153 + race: 1.4575411 + for: 1.1908325 + 5.0k: 2.489943 + + - match: { result: "created" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + get: + index: index-with-sparse-vector2 + id: "has-dots" + + - match: + _source: + ml: + tokens: + running: 2.4097164 + good: 2.170997 + run: 2.052153 + race: 1.4575411 + for: 1.1908325 + 5.0k: 2.489943 + + - do: + search: + index: index-with-sparse-vector2 + body: + query: + sparse_vector: + field: ml.tokens + query_vector: + 5.0k: 2.489943 + + - match: { hits.total.value: 1 } + +--- +"Search on a nested sparse_vector field with dots in the field names and conflicting child fields": + + - requires: + cluster_features: [ "gte_v8.16.0" ] + reason: dots in field names allowed starting in in 8.16.0 + + - do: + indices.create: + index: index-with-sparse-vector3 + body: + mappings: + properties: + parent: + type: object + subobjects: false + properties: + foo: + type: sparse_vector + foo.bar: + type: sparse_vector + + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + bulk: + index: index-with-sparse-vector3 + refresh: true + body: | + {"index": { "_id": "parent-foo" }} + {"parent.foo": { "bar.baz": 1.0 }} + {"index": { "_id": "parent-foo-bar" }} + {"parent.foo.bar": { "baz": 2.0 }} + {"index": { "_id": "both-docs" }} + {"parent.foo": { "bar.baz": 3.0 }, "parent.foo.bar": { "baz": 4.0 }} + + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + get: + index: index-with-sparse-vector3 + id: "parent-foo" + + - match: + _source: + parent.foo: + bar.baz: 1.0 + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + get: + index: index-with-sparse-vector3 + id: "parent-foo-bar" + + - match: + _source: + parent.foo.bar: + baz: 2.0 + + - do: + search: + index: index-with-sparse-vector3 + body: + query: + sparse_vector: + field: parent.foo + query_vector: + bar.baz: 1.0 + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "both-docs" } + - match: { hits.hits.0._score: 3.0 } + - match: { hits.hits.1._id: "parent-foo" } + - match: { hits.hits.1._score: 1.0 } + + - do: + search: + index: index-with-sparse-vector3 + body: + query: + sparse_vector: + field: parent.foo.bar + query_vector: + baz: 1.0 + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "both-docs" } + - match: { hits.hits.0._score: 4.0 } + - match: { hits.hits.1._id: "parent-foo-bar" } + - match: { hits.hits.1._score: 2.0 } From e8569356ea3c4bd2d23d1ca848d4c5e43547bb4e Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:56:43 -0400 Subject: [PATCH 11/31] [ES|QL] explicit cast a string literal to date_period and time_duration in arithmetic operations (#109193) explicit cast to date_period and time_duration in arithmic operation --- docs/changelog/109193.yaml | 6 + .../description/to_dateperiod.asciidoc | 5 + .../description/to_timeduration.asciidoc | 5 + .../functions/examples/to_dateperiod.asciidoc | 13 + .../examples/to_timeduration.asciidoc | 13 + .../kibana/definition/to_dateperiod.json | 47 ++++ .../kibana/definition/to_timeduration.json | 47 ++++ .../functions/kibana/docs/to_dateperiod.md | 10 + .../functions/kibana/docs/to_timeduration.md | 10 + .../esql/functions/kibana/inline_cast.json | 2 + .../functions/layout/to_dateperiod.asciidoc | 15 ++ .../functions/layout/to_timeduration.asciidoc | 15 ++ .../parameters/to_dateperiod.asciidoc | 6 + .../parameters/to_timeduration.asciidoc | 6 + .../functions/signature/to_dateperiod.svg | 1 + .../functions/signature/to_timeduration.svg | 1 + .../type-conversion-functions.asciidoc | 4 + .../functions/types/to_dateperiod.asciidoc | 11 + .../functions/types/to_timeduration.asciidoc | 11 + .../xpack/esql/qa/rest/RestEsqlTestCase.java | 35 +++ .../src/main/resources/convert.csv-spec | 137 ++++++++++ .../src/main/resources/meta.csv-spec | 10 +- .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../xpack/esql/analysis/Verifier.java | 7 +- .../function/EsqlFunctionRegistry.java | 4 + .../convert/AbstractConvertFunction.java | 2 +- .../convert/FoldablesConvertFunction.java | 75 ++++++ .../function/scalar/convert/ToDatePeriod.java | 54 ++++ .../scalar/convert/ToTimeDuration.java | 54 ++++ .../xpack/esql/parser/ExpressionBuilder.java | 4 +- .../esql/type/EsqlDataTypeConverter.java | 148 +++++++++-- .../xpack/esql/analysis/VerifierTests.java | 245 +++++++++++++++++- .../scalar/convert/ToDatePeriodTests.java | 88 +++++++ .../scalar/convert/ToTimeDurationTests.java | 87 +++++++ .../optimizer/LogicalPlanOptimizerTests.java | 141 ++++++++++ .../esql/parser/StatementParserTests.java | 16 ++ .../rest-api-spec/test/esql/10_basic.yml | 29 +++ 37 files changed, 1335 insertions(+), 36 deletions(-) create mode 100644 docs/changelog/109193.yaml create mode 100644 docs/reference/esql/functions/description/to_dateperiod.asciidoc create mode 100644 docs/reference/esql/functions/description/to_timeduration.asciidoc create mode 100644 docs/reference/esql/functions/examples/to_dateperiod.asciidoc create mode 100644 docs/reference/esql/functions/examples/to_timeduration.asciidoc create mode 100644 docs/reference/esql/functions/kibana/definition/to_dateperiod.json create mode 100644 docs/reference/esql/functions/kibana/definition/to_timeduration.json create mode 100644 docs/reference/esql/functions/kibana/docs/to_dateperiod.md create mode 100644 docs/reference/esql/functions/kibana/docs/to_timeduration.md create mode 100644 docs/reference/esql/functions/layout/to_dateperiod.asciidoc create mode 100644 docs/reference/esql/functions/layout/to_timeduration.asciidoc create mode 100644 docs/reference/esql/functions/parameters/to_dateperiod.asciidoc create mode 100644 docs/reference/esql/functions/parameters/to_timeduration.asciidoc create mode 100644 docs/reference/esql/functions/signature/to_dateperiod.svg create mode 100644 docs/reference/esql/functions/signature/to_timeduration.svg create mode 100644 docs/reference/esql/functions/types/to_dateperiod.asciidoc create mode 100644 docs/reference/esql/functions/types/to_timeduration.asciidoc create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriod.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDuration.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriodTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDurationTests.java diff --git a/docs/changelog/109193.yaml b/docs/changelog/109193.yaml new file mode 100644 index 0000000000000..5cc664eaee2cd --- /dev/null +++ b/docs/changelog/109193.yaml @@ -0,0 +1,6 @@ +pr: 109193 +summary: "[ES|QL] explicit cast a string literal to `date_period` and `time_duration`\ + \ in arithmetic operations" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/description/to_dateperiod.asciidoc b/docs/reference/esql/functions/description/to_dateperiod.asciidoc new file mode 100644 index 0000000000000..443e377bf51c5 --- /dev/null +++ b/docs/reference/esql/functions/description/to_dateperiod.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts an input value into a `date_period` value. diff --git a/docs/reference/esql/functions/description/to_timeduration.asciidoc b/docs/reference/esql/functions/description/to_timeduration.asciidoc new file mode 100644 index 0000000000000..87c405a98ff65 --- /dev/null +++ b/docs/reference/esql/functions/description/to_timeduration.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts an input value into a `time_duration` value. diff --git a/docs/reference/esql/functions/examples/to_dateperiod.asciidoc b/docs/reference/esql/functions/examples/to_dateperiod.asciidoc new file mode 100644 index 0000000000000..91272b33b45ed --- /dev/null +++ b/docs/reference/esql/functions/examples/to_dateperiod.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/convert.csv-spec[tag=castToDatePeriod] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/convert.csv-spec[tag=castToDatePeriod-result] +|=== + diff --git a/docs/reference/esql/functions/examples/to_timeduration.asciidoc b/docs/reference/esql/functions/examples/to_timeduration.asciidoc new file mode 100644 index 0000000000000..7e62a39bbe3e2 --- /dev/null +++ b/docs/reference/esql/functions/examples/to_timeduration.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/convert.csv-spec[tag=castToTimeDuration] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/convert.csv-spec[tag=castToTimeDuration-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/to_dateperiod.json b/docs/reference/esql/functions/kibana/definition/to_dateperiod.json new file mode 100644 index 0000000000000..dc9176f4cc0b0 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/to_dateperiod.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "to_dateperiod", + "description" : "Converts an input value into a `date_period` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "date_period", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + } + ], + "examples" : [ + "row x = \"2024-01-01\"::datetime | eval y = x + \"3 DAYS\"::date_period, z = x - to_dateperiod(\"3 days\");" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/to_timeduration.json b/docs/reference/esql/functions/kibana/definition/to_timeduration.json new file mode 100644 index 0000000000000..039de323044ed --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/to_timeduration.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "to_timeduration", + "description" : "Converts an input value into a `time_duration` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "field", + "type" : "time_duration", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + } + ], + "examples" : [ + "row x = \"2024-01-01\"::datetime | eval y = x + \"3 hours\"::time_duration, z = x - to_timeduration(\"3 hours\");" + ] +} diff --git a/docs/reference/esql/functions/kibana/docs/to_dateperiod.md b/docs/reference/esql/functions/kibana/docs/to_dateperiod.md new file mode 100644 index 0000000000000..adbbe75783051 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/to_dateperiod.md @@ -0,0 +1,10 @@ + + +### TO_DATEPERIOD +Converts an input value into a `date_period` value. + +``` +row x = "2024-01-01"::datetime | eval y = x + "3 DAYS"::date_period, z = x - to_dateperiod("3 days"); +``` diff --git a/docs/reference/esql/functions/kibana/docs/to_timeduration.md b/docs/reference/esql/functions/kibana/docs/to_timeduration.md new file mode 100644 index 0000000000000..52e32ba97d11c --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/to_timeduration.md @@ -0,0 +1,10 @@ + + +### TO_TIMEDURATION +Converts an input value into a `time_duration` value. + +``` +row x = "2024-01-01"::datetime | eval y = x + "3 hours"::time_duration, z = x - to_timeduration("3 hours"); +``` diff --git a/docs/reference/esql/functions/kibana/inline_cast.json b/docs/reference/esql/functions/kibana/inline_cast.json index f71572d3d651c..f1aa283c52e95 100644 --- a/docs/reference/esql/functions/kibana/inline_cast.json +++ b/docs/reference/esql/functions/kibana/inline_cast.json @@ -3,6 +3,7 @@ "boolean" : "to_boolean", "cartesian_point" : "to_cartesianpoint", "cartesian_shape" : "to_cartesianshape", + "date_period" : "to_dateperiod", "datetime" : "to_datetime", "double" : "to_double", "geo_point" : "to_geopoint", @@ -14,6 +15,7 @@ "long" : "to_long", "string" : "to_string", "text" : "to_string", + "time_duration" : "to_timeduration", "unsigned_long" : "to_unsigned_long", "version" : "to_version" } \ No newline at end of file diff --git a/docs/reference/esql/functions/layout/to_dateperiod.asciidoc b/docs/reference/esql/functions/layout/to_dateperiod.asciidoc new file mode 100644 index 0000000000000..0345c1a6680c8 --- /dev/null +++ b/docs/reference/esql/functions/layout/to_dateperiod.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-to_dateperiod]] +=== `TO_DATEPERIOD` + +*Syntax* + +[.text-center] +image::esql/functions/signature/to_dateperiod.svg[Embedded,opts=inline] + +include::../parameters/to_dateperiod.asciidoc[] +include::../description/to_dateperiod.asciidoc[] +include::../types/to_dateperiod.asciidoc[] +include::../examples/to_dateperiod.asciidoc[] diff --git a/docs/reference/esql/functions/layout/to_timeduration.asciidoc b/docs/reference/esql/functions/layout/to_timeduration.asciidoc new file mode 100644 index 0000000000000..bed4743c730a8 --- /dev/null +++ b/docs/reference/esql/functions/layout/to_timeduration.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-to_timeduration]] +=== `TO_TIMEDURATION` + +*Syntax* + +[.text-center] +image::esql/functions/signature/to_timeduration.svg[Embedded,opts=inline] + +include::../parameters/to_timeduration.asciidoc[] +include::../description/to_timeduration.asciidoc[] +include::../types/to_timeduration.asciidoc[] +include::../examples/to_timeduration.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc b/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc new file mode 100644 index 0000000000000..1e5ed14cf44ae --- /dev/null +++ b/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Input value. The input is a valid constant date period expression. diff --git a/docs/reference/esql/functions/parameters/to_timeduration.asciidoc b/docs/reference/esql/functions/parameters/to_timeduration.asciidoc new file mode 100644 index 0000000000000..0289dc37dbfe6 --- /dev/null +++ b/docs/reference/esql/functions/parameters/to_timeduration.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Input value. The input is a valid constant time duration expression. diff --git a/docs/reference/esql/functions/signature/to_dateperiod.svg b/docs/reference/esql/functions/signature/to_dateperiod.svg new file mode 100644 index 0000000000000..302a9ee3bfa69 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_dateperiod.svg @@ -0,0 +1 @@ +TO_DATEPERIOD(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_timeduration.svg b/docs/reference/esql/functions/signature/to_timeduration.svg new file mode 100644 index 0000000000000..b237441b3b40d --- /dev/null +++ b/docs/reference/esql/functions/signature/to_timeduration.svg @@ -0,0 +1 @@ +TO_TIMEDURATION(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc index 96c29a776bc2b..9ac9ec290c07b 100644 --- a/docs/reference/esql/functions/type-conversion-functions.asciidoc +++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc @@ -16,6 +16,7 @@ * <> * <> * <> +* experimental:[] <> * <> * <> * <> @@ -26,6 +27,7 @@ * <> * <> * <> +* experimental:[] <> * experimental:[] <> * <> // end::type_list[] @@ -33,6 +35,7 @@ include::layout/to_boolean.asciidoc[] include::layout/to_cartesianpoint.asciidoc[] include::layout/to_cartesianshape.asciidoc[] +include::layout/to_dateperiod.asciidoc[] include::layout/to_datetime.asciidoc[] include::layout/to_degrees.asciidoc[] include::layout/to_double.asciidoc[] @@ -43,5 +46,6 @@ include::layout/to_ip.asciidoc[] include::layout/to_long.asciidoc[] include::layout/to_radians.asciidoc[] include::layout/to_string.asciidoc[] +include::layout/to_timeduration.asciidoc[] include::layout/to_unsigned_long.asciidoc[] include::layout/to_version.asciidoc[] diff --git a/docs/reference/esql/functions/types/to_dateperiod.asciidoc b/docs/reference/esql/functions/types/to_dateperiod.asciidoc new file mode 100644 index 0000000000000..1bbc33fe3ca7d --- /dev/null +++ b/docs/reference/esql/functions/types/to_dateperiod.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +date_period | date_period +keyword | date_period +text | date_period +|=== diff --git a/docs/reference/esql/functions/types/to_timeduration.asciidoc b/docs/reference/esql/functions/types/to_timeduration.asciidoc new file mode 100644 index 0000000000000..b82a5bb4f9f87 --- /dev/null +++ b/docs/reference/esql/functions/types/to_timeduration.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +keyword | time_duration +text | time_duration +time_duration | time_duration +|=== diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 6f5297bbeef4d..d9d11c3568ab7 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -609,6 +609,41 @@ public void testErrorMessageForInvalidParams() throws IOException { assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Unknown query parameter [n0], did you mean [n1]")); } + public void testErrorMessageForInvalidIntervalParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsqlSync( + requestObjectBuilder().query("row x = ?n1::datetime | eval y = x + ?n2::time_duration") + .params("[{\"n1\": \"2024-01-01\"}, {\"n2\": \"3 days\"}]") + ) + ); + + String error = re.getMessage().replaceAll("\\\\\n\s+\\\\", ""); + assertThat( + error, + containsString( + "Invalid interval value in [?n2::time_duration], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3 days]" + ) + ); + + re = expectThrows( + ResponseException.class, + () -> runEsqlSync( + requestObjectBuilder().query("row x = ?n1::datetime | eval y = x - ?n2::date_period") + .params("[{\"n1\": \"2024-01-01\"}, {\"n2\": \"3 hours\"}]") + ) + ); + error = re.getMessage().replaceAll("\\\\\n\s+\\\\", ""); + assertThat( + error, + containsString( + "Invalid interval value in [?n2::date_period], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [3 hours]" + ) + ); + } + public void testErrorMessageForLiteralDateMathOverflow() throws IOException { List dateMathOverflowExpressions = List.of( "2147483647 day + 1 day", diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec index 42b5c0344b559..1397965145a1a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec @@ -214,3 +214,140 @@ from employees emp_no:integer | languages:integer | height:double 10037 | 2 | 2.0 ; + +convertToDatePeriod +required_capability: cast_string_literal_to_temporal_amount +//tag::castToDatePeriod[] +row x = "2024-01-01"::datetime | eval y = x + "3 DAYS"::date_period, z = x - to_dateperiod("3 days"); +//end::castToDatePeriod[] + +//tag::castToDatePeriod-result[] +x:datetime |y:datetime |z:datetime +2024-01-01 |2024-01-04 |2023-12-29 +//end::castToDatePeriod-result[] +; + +convertToTimeDuration +required_capability: cast_string_literal_to_temporal_amount +//tag::castToTimeDuration[] +row x = "2024-01-01"::datetime | eval y = x + "3 hours"::time_duration, z = x - to_timeduration("3 hours"); +//end::castToTimeDuration[] + +//tag::castToTimeDuration-result[] +x:datetime |y:datetime |z:datetime +2024-01-01 |2024-01-01T03:00:00.000Z |2023-12-31T21:00:00.000Z +//end::castToTimeDuration-result[] +; + +convertToDatePeriodTimeDuration +required_capability: cast_string_literal_to_temporal_amount +row x = "2024-01-01"::datetime + "3 hours"::time_duration, y = "2024-01-01"::datetime - to_timeduration("3 hours"), +z = "2024-01-01"::datetime + "3 DAYS"::date_period, w = "2024-01-01"::datetime - to_dateperiod("3 days"), u = "3 days", +v = "3 hours" +| eval a = "2024-01-01" + u::date_period, b = "2024-01-01" - v::time_duration +| keep x, y, z, w, a, b; + +x:datetime |y:datetime |z:datetime |w:datetime |a:datetime |b:datetime +2024-01-01T03:00:00.000Z |2023-12-31T21:00:00.000Z |2024-01-04T00:00:00.000Z |2023-12-29T00:00:00.000Z |2024-01-04T00:00:00.000Z |2023-12-31T21:00:00.000Z +; + +convertToDatePeriodNested +required_capability: cast_string_literal_to_temporal_amount +row x = "2024-01-01"::datetime +| eval y = x + to_dateperiod("3 days"::date_period) +; + +x:datetime |y:datetime +2024-01-01 |2024-01-04 +; + +convertToTimeDurationNested +required_capability: cast_string_literal_to_temporal_amount +row x = "2024-01-01"::datetime +| eval y = x + to_timeduration("3 hours"::time_duration) +; + +x:datetime |y:datetime +2024-01-01 |2024-01-01T03:00:00.000Z +; + +convertToDatePeriodFromIndex +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + "3 days"::date_period, y = birth_date - to_dateperiod("3 days") +| KEEP birth_date, x, y; + +birth_date:datetime |x:datetime |y:datetime +1953-09-02T00:00:00Z |1953-09-05T00:00:00Z |1953-08-30T00:00:00Z +; + +convertToTimeDurationFromIndex +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + "3 hours"::time_duration, y = birth_date - to_timeduration("3 hours") +| KEEP birth_date, x, y; + +birth_date:datetime |x:datetime |y:datetime +1953-09-02T00:00:00Z |1953-09-02T03:00:00Z |1953-09-01T21:00:00Z +; + +convertToDatePeriodTimeDurationRef +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL interval_timeduration = "3 hours", x = birth_date + interval_timeduration::time_duration, y = birth_date - concat("3 ", "hours")::time_duration +| EVAL interval_dateperiod = "3 months", z = birth_date + interval_dateperiod::date_period, w = birth_date - concat("3 ", "months")::date_period +| EVAL a = "3", b = "hours", c = birth_date + concat(concat(a, " "), b)::time_duration +| KEEP birth_date, x, y, z, w, c; + +birth_date:datetime |x:datetime |y:datetime |z:datetime |w:datetime |c:datetime +1953-09-02T00:00:00Z |1953-09-02T03:00:00Z |1953-09-01T21:00:00Z |1953-12-02T00:00:00Z |1953-06-02T00:00:00Z |1953-09-02T03:00:00Z +; + +convertToDatePeriodNull +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + null::date_period, y = birth_date - to_dateperiod(null), z = birth_date + to_string(null)::date_period +| KEEP birth_date, x, y, z; + +birth_date:datetime |x:datetime |y:datetime |z:datetime +1953-09-02T00:00:00Z |null |null |null +; + +convertToTimeDurationNull +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + null::time_duration, y = birth_date - to_timeduration(null), z = birth_date + to_string(null)::time_duration +| KEEP birth_date, x, y, z; + +birth_date:datetime |x:datetime |y:datetime |z:datetime +1953-09-02T00:00:00Z |null |null |null +; + +convertToDatePeriodIntegerLiteral +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + 3 days::date_period, y = birth_date - to_dateperiod(3 days), +z = birth_date + 3 months + 3 days::date_period, w = birth_date + (3 months + 3 days)::date_period +| KEEP birth_date, x, y, z, w; + +birth_date:datetime |x:datetime |y:datetime |z:datetime |w:datetime +1953-09-02T00:00:00Z |1953-09-05T00:00:00Z |1953-08-30T00:00:00Z |1953-12-05T00:00:00Z |1953-12-05T00:00:00Z +; + +convertToTimeDurationIntegerLiteral +required_capability: cast_string_literal_to_temporal_amount +FROM employees +| WHERE emp_no == 10001 +| EVAL x = birth_date + 3 hours::time_duration, y = birth_date - to_timeduration(3 hours), +z = birth_date + 3 hours + 3 minutes::time_duration, w = birth_date + (3 hours + 3 minutes)::time_duration +| KEEP birth_date, x, y, z, w; + +birth_date:datetime |x:datetime |y:datetime |z:datetime |w:datetime +1953-09-02T00:00:00Z |1953-09-02T03:00:00Z |1953-09-01T21:00:00Z |1953-09-02T03:03:00Z |1953-09-02T03:03:00Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec index bc90f7f616631..6909f0aeb42f5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/meta.csv-spec @@ -95,6 +95,7 @@ double tau() "boolean to_boolean(field:boolean|keyword|text|double|long|unsigned_long|integer)" "cartesian_point to_cartesianpoint(field:cartesian_point|keyword|text)" "cartesian_shape to_cartesianshape(field:cartesian_point|cartesian_shape|keyword|text)" +"date_period to_dateperiod(field:date_period|keyword|text)" "date to_datetime(field:date|date_nanos|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(field:boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long)" "double to_degrees(number:double|integer|long|unsigned_long)" @@ -110,6 +111,7 @@ double tau() "double to_radians(number:double|integer|long|unsigned_long)" "keyword to_str(field:boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "keyword to_string(field:boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"time_duration to_timeduration(field:time_duration|keyword|text)" "unsigned_long to_ul(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_ulong(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_unsigned_long(field:boolean|date|keyword|text|double|long|unsigned_long|integer)" @@ -221,6 +223,7 @@ to_bool |field |"boolean|keyword|text|double to_boolean |field |"boolean|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_cartesianpo|field |"cartesian_point|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. to_cartesiansh|field |"cartesian_point|cartesian_shape|keyword|text" |Input value. The input can be a single- or multi-valued column or an expression. +to_dateperiod |field |"date_period|keyword|text" |Input value. The input is a valid constant date period expression. to_datetime |field |"date|date_nanos|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_dbl |field |"boolean|date|keyword|text|double|long|unsigned_long|integer|counter_double|counter_integer|counter_long" |Input value. The input can be a single- or multi-valued column or an expression. to_degrees |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. @@ -236,6 +239,7 @@ to_lower |str |"keyword|text" to_radians |number |"double|integer|long|unsigned_long" |Input value. The input can be a single- or multi-valued column or an expression. to_str |field |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. to_string |field |"boolean|cartesian_point|cartesian_shape|date|date_nanos|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" |Input value. The input can be a single- or multi-valued column or an expression. +to_timeduratio|field |"time_duration|keyword|text" |Input value. The input is a valid constant time duration expression. to_ul |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_ulong |field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. to_unsigned_lo|field |"boolean|date|keyword|text|double|long|unsigned_long|integer" |Input value. The input can be a single- or multi-valued column or an expression. @@ -347,6 +351,7 @@ to_bool |Converts an input value to a boolean value. A string value of *tr to_boolean |Converts an input value to a boolean value. A string value of *true* will be case-insensitive converted to the Boolean *true*. For anything else, including the empty string, the function will return *false*. The numerical value of *0* will be converted to *false*, anything else will be converted to *true*. to_cartesianpo|Converts an input value to a `cartesian_point` value. A string will only be successfully converted if it respects the {wikipedia}/Well-known_text_representation_of_geometry[WKT Point] format. to_cartesiansh|Converts an input value to a `cartesian_shape` value. A string will only be successfully converted if it respects the {wikipedia}/Well-known_text_representation_of_geometry[WKT] format. +to_dateperiod |Converts an input value into a `date_period` value. to_datetime |Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. to_dbl |Converts an input value to a double value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the {wikipedia}/Unix_time[Unix epoch], converted to double. Boolean *true* will be converted to double *1.0*, *false* to *0.0*. to_degrees |Converts a number in {wikipedia}/Radian[radians] to {wikipedia}/Degree_(angle)[degrees]. @@ -362,6 +367,7 @@ to_lower |Returns a new string representing the input string converted to l to_radians |Converts a number in {wikipedia}/Degree_(angle)[degrees] to {wikipedia}/Radian[radians]. to_str |Converts an input value into a string. to_string |Converts an input value into a string. +to_timeduratio|Converts an input value into a `time_duration` value. to_ul |Converts an input value to an unsigned long value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the {wikipedia}/Unix_time[Unix epoch], converted to unsigned long. Boolean *true* will be converted to unsigned long *1*, *false* to *0*. to_ulong |Converts an input value to an unsigned long value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the {wikipedia}/Unix_time[Unix epoch], converted to unsigned long. Boolean *true* will be converted to unsigned long *1*, *false* to *0*. to_unsigned_lo|Converts an input value to an unsigned long value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the {wikipedia}/Unix_time[Unix epoch], converted to unsigned long. Boolean *true* will be converted to unsigned long *1*, *false* to *0*. @@ -475,6 +481,7 @@ to_bool |boolean to_boolean |boolean |false |false |false to_cartesianpo|cartesian_point |false |false |false to_cartesiansh|cartesian_shape |false |false |false +to_dateperiod |date_period |false |false |false to_datetime |date |false |false |false to_dbl |double |false |false |false to_degrees |double |false |false |false @@ -490,6 +497,7 @@ to_lower |"keyword|text" to_radians |double |false |false |false to_str |keyword |false |false |false to_string |keyword |false |false |false +to_timeduratio|time_duration |false |false |false to_ul |unsigned_long |false |false |false to_ulong |unsigned_long |false |false |false to_unsigned_lo|unsigned_long |false |false |false @@ -516,5 +524,5 @@ countFunctions#[skip:-8.15.99] meta functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -117 | 117 | 117 +119 | 119 | 119 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 858e2a3332bf8..c0c5ebf010ffd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -289,7 +289,12 @@ public enum Cap { /** * Support for requesting the "SPACE" function. */ - SPACE; + SPACE, + + /** + * Support explicit casting from string literal to DATE_PERIOD or TIME_DURATION. + */ + CAST_STRING_LITERAL_TO_TEMPORAL_AMOUNT; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index f295c4b64bd8d..9714d3fce6d9f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -394,7 +394,12 @@ private static void checkEvalFields(LogicalPlan p, Set failures) { DataType dataType = field.dataType(); if (DataType.isRepresentable(dataType) == false) { failures.add( - fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) + fail( + field, + "EVAL does not support type [{}] as the return data type of expression [{}]", + dataType.typeName(), + field.child().sourceText() + ) ); } // check no aggregate functions are used diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index ea7d601369bc7..49076a1d65e72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatePeriod; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; @@ -51,6 +52,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToRadians; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToTimeDuration; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; @@ -343,6 +345,7 @@ private FunctionDefinition[][] functions() { def(ToBoolean.class, ToBoolean::new, "to_boolean", "to_bool"), def(ToCartesianPoint.class, ToCartesianPoint::new, "to_cartesianpoint"), def(ToCartesianShape.class, ToCartesianShape::new, "to_cartesianshape"), + def(ToDatePeriod.class, ToDatePeriod::new, "to_dateperiod"), def(ToDatetime.class, ToDatetime::new, "to_datetime", "to_dt"), def(ToDegrees.class, ToDegrees::new, "to_degrees"), def(ToDouble.class, ToDouble::new, "to_double", "to_dbl"), @@ -353,6 +356,7 @@ private FunctionDefinition[][] functions() { def(ToLong.class, ToLong::new, "to_long"), def(ToRadians.class, ToRadians::new, "to_radians"), def(ToString.class, ToString::new, "to_string", "to_str"), + def(ToTimeDuration.class, ToTimeDuration::new, "to_timeduration"), def(ToUnsignedLong.class, ToUnsignedLong::new, "to_unsigned_long", "to_ulong", "to_ul"), def(ToVersion.class, ToVersion::new, "to_version", "to_ver"), }, // multivalue functions diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index cf97558cd2676..2795ac857983c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -72,7 +72,7 @@ protected final ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factor } @Override - protected final TypeResolution resolveType() { + protected TypeResolution resolveType() { if (childrenResolved() == false) { return new TypeResolution("Unresolved children"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java new file mode 100644 index 0000000000000..6e2b5bb63532d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.capabilities.Validatable; +import org.elasticsearch.xpack.esql.common.Failures; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; + +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; +import static org.elasticsearch.xpack.esql.core.type.DataType.isString; +import static org.elasticsearch.xpack.esql.expression.Validations.isFoldable; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.foldToTemporalAmount; + +/** + * Base class for functions that converts a constant into an interval type - DATE_PERIOD or TIME_DURATION. + * The functions will be folded at the end of LogicalPlanOptimizer by the coordinator, it does not reach data node. + */ +public abstract class FoldablesConvertFunction extends AbstractConvertFunction implements Validatable { + + protected FoldablesConvertFunction(Source source, Expression field) { + super(source, field); + } + + @Override + public final void writeTo(StreamOutput out) { + throw new UnsupportedOperationException("not serialized"); + } + + @Override + public final String getWriteableName() { + throw new UnsupportedOperationException("not serialized"); + } + + @Override + protected final TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + return isType( + field(), + dt -> isString(dt) || dt == dataType(), + sourceText(), + null, + false, + dataType().typeName().toLowerCase(Locale.ROOT) + " or string" + ); + } + + @Override + protected final Map factories() { + // TODO if a union type field is provided as an input, the correct error message is not shown, #112668 is a follow up + return Map.of(); + } + + @Override + public final Object fold() { + return foldToTemporalAmount(field(), sourceText(), dataType()); + } + + @Override + public final void validate(Failures failures) { + failures.add(isFoldable(field(), sourceText(), null)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriod.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriod.java new file mode 100644 index 0000000000000..86b46c792c85b --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriod.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; + +public class ToDatePeriod extends FoldablesConvertFunction { + + @FunctionInfo( + returnType = "date_period", + description = "Converts an input value into a `date_period` value.", + examples = @Example(file = "convert", tag = "castToDatePeriod") + ) + public ToDatePeriod( + Source source, + @Param( + name = "field", + type = { "date_period", "keyword", "text" }, + description = "Input value. The input is a valid constant date period expression." + ) Expression v + ) { + super(source, v); + } + + @Override + public DataType dataType() { + return DATE_PERIOD; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToDatePeriod(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToDatePeriod::new, field()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDuration.java new file mode 100644 index 0000000000000..95425deccdda1 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDuration.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; + +public class ToTimeDuration extends FoldablesConvertFunction { + + @FunctionInfo( + returnType = "time_duration", + description = "Converts an input value into a `time_duration` value.", + examples = @Example(file = "convert", tag = "castToTimeDuration") + ) + public ToTimeDuration( + Source source, + @Param( + name = "field", + type = { "time_duration", "keyword", "text" }, + description = "Input value. The input is a valid constant time duration expression." + ) Expression v + ) { + super(source, v); + } + + @Override + public DataType dataType() { + return TIME_DURATION; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToTimeDuration(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToTimeDuration::new, field()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 2621c76805591..cae0f3e084d54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -83,7 +83,7 @@ import static org.elasticsearch.xpack.esql.parser.ParserUtils.typedParsing; import static org.elasticsearch.xpack.esql.parser.ParserUtils.visitList; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.bigIntegerToUnsignedLong; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.parseTemporalAmout; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.parseTemporalAmount; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.stringToIntegral; public abstract class ExpressionBuilder extends IdentifierBuilder { @@ -458,7 +458,7 @@ public Object visitQualifiedIntegerLiteral(EsqlBaseParser.QualifiedIntegerLitera String qualifier = ctx.UNQUOTED_IDENTIFIER().getText().toLowerCase(Locale.ROOT); try { - TemporalAmount quantity = parseTemporalAmout(value, qualifier, source); + TemporalAmount quantity = parseTemporalAmount(value, qualifier, source); return new Literal(source, quantity, quantity instanceof Duration ? TIME_DURATION : DATE_PERIOD); } catch (InvalidArgumentException | ArithmeticException e) { // the range varies by unit: Duration#ofMinutes(), #ofHours() will Math#multiplyExact() to reduce the unit to seconds; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index b090708a64ad3..0c530bd0eb273 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -10,12 +10,14 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.Converter; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatePeriod; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; @@ -34,6 +37,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToTimeDuration; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; import org.elasticsearch.xpack.esql.parser.ParsingException; @@ -48,6 +52,7 @@ import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAmount; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.BiFunction; @@ -110,9 +115,83 @@ public class EsqlDataTypeConverter { entry(KEYWORD, ToString::new), entry(TEXT, ToString::new), entry(UNSIGNED_LONG, ToUnsignedLong::new), - entry(VERSION, ToVersion::new) + entry(VERSION, ToVersion::new), + entry(DATE_PERIOD, ToDatePeriod::new), + entry(TIME_DURATION, ToTimeDuration::new) ); + public enum INTERVALS { + // TIME_DURATION, + MILLISECOND, + MILLISECONDS, + MS, + SECOND, + SECONDS, + SEC, + S, + MINUTE, + MINUTES, + MIN, + HOUR, + HOURS, + H, + // DATE_PERIOD + DAY, + DAYS, + D, + WEEK, + WEEKS, + W, + MONTH, + MONTHS, + MO, + QUARTER, + QUARTERS, + Q, + YEAR, + YEARS, + YR, + Y; + } + + public static List TIME_DURATIONS = List.of( + INTERVALS.MILLISECOND, + INTERVALS.MILLISECONDS, + INTERVALS.MS, + INTERVALS.SECOND, + INTERVALS.SECONDS, + INTERVALS.SEC, + INTERVALS.S, + INTERVALS.MINUTE, + INTERVALS.MINUTES, + INTERVALS.MIN, + INTERVALS.HOUR, + INTERVALS.HOURS, + INTERVALS.H + ); + + public static List DATE_PERIODS = List.of( + INTERVALS.DAY, + INTERVALS.DAYS, + INTERVALS.D, + INTERVALS.WEEK, + INTERVALS.WEEKS, + INTERVALS.W, + INTERVALS.MONTH, + INTERVALS.MONTHS, + INTERVALS.MO, + INTERVALS.QUARTER, + INTERVALS.QUARTERS, + INTERVALS.Q, + INTERVALS.YEAR, + INTERVALS.YEARS, + INTERVALS.YR, + INTERVALS.Y + ); + + public static final String INVALID_INTERVAL_ERROR = + "Invalid interval value in [{}], expected integer followed by one of {} but got [{}]"; + public static Converter converterFor(DataType from, DataType to) { // TODO move EXPRESSION_TO_LONG here if there is no regression if (isString(from)) { @@ -154,6 +233,38 @@ public static Converter converterFor(DataType from, DataType to) { return null; } + public static TemporalAmount foldToTemporalAmount(Expression field, String sourceText, DataType expectedType) { + if (field.foldable()) { + Object v = field.fold(); + if (v instanceof BytesRef b) { + try { + return EsqlDataTypeConverter.parseTemporalAmount(b.utf8ToString(), expectedType); + } catch (ParsingException e) { + throw new IllegalArgumentException( + LoggerMessageFormat.format( + null, + INVALID_INTERVAL_ERROR, + sourceText, + expectedType == DATE_PERIOD ? DATE_PERIODS : TIME_DURATIONS, + b.utf8ToString() + ) + ); + } + } else if (v instanceof TemporalAmount t) { + return t; + } + } + + throw new IllegalArgumentException( + LoggerMessageFormat.format( + null, + "argument of [{}] must be a constant, received [{}]", + field.sourceText(), + Expressions.name(field) + ) + ); + } + public static TemporalAmount parseTemporalAmount(Object val, DataType expectedType) { String errorMessage = "Cannot parse [{}] to {}"; String str = String.valueOf(val); @@ -181,7 +292,7 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy if ((value.isEmpty() || qualifier.isEmpty()) == false) { try { - TemporalAmount result = parseTemporalAmout(Integer.parseInt(value.toString()), qualifier.toString(), Source.EMPTY); + TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), qualifier.toString(), Source.EMPTY); if (DataType.DATE_PERIOD == expectedType && result instanceof Period || DataType.TIME_DURATION == expectedType && result instanceof Duration) { return result; @@ -196,7 +307,6 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy // wrong pattern } } - throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); } @@ -284,22 +394,24 @@ public static DataType commonType(DataType left, DataType right) { } // generally supporting abbreviations from https://en.wikipedia.org/wiki/Unit_of_time - public static TemporalAmount parseTemporalAmout(Number value, String qualifier, Source source) throws InvalidArgumentException, + public static TemporalAmount parseTemporalAmount(Number value, String qualifier, Source source) throws InvalidArgumentException, ArithmeticException, ParsingException { - return switch (qualifier) { - case "millisecond", "milliseconds", "ms" -> Duration.ofMillis(safeToLong(value)); - case "second", "seconds", "sec", "s" -> Duration.ofSeconds(safeToLong(value)); - case "minute", "minutes", "min" -> Duration.ofMinutes(safeToLong(value)); - case "hour", "hours", "h" -> Duration.ofHours(safeToLong(value)); - - case "day", "days", "d" -> Period.ofDays(safeToInt(safeToLong(value))); - case "week", "weeks", "w" -> Period.ofWeeks(safeToInt(safeToLong(value))); - case "month", "months", "mo" -> Period.ofMonths(safeToInt(safeToLong(value))); - case "quarter", "quarters", "q" -> Period.ofMonths(safeToInt(Math.multiplyExact(3L, safeToLong(value)))); - case "year", "years", "yr", "y" -> Period.ofYears(safeToInt(safeToLong(value))); - - default -> throw new ParsingException(source, "Unexpected time interval qualifier: '{}'", qualifier); - }; + try { + return switch (INTERVALS.valueOf(qualifier.toUpperCase(Locale.ROOT))) { + case MILLISECOND, MILLISECONDS, MS -> Duration.ofMillis(safeToLong(value)); + case SECOND, SECONDS, SEC, S -> Duration.ofSeconds(safeToLong(value)); + case MINUTE, MINUTES, MIN -> Duration.ofMinutes(safeToLong(value)); + case HOUR, HOURS, H -> Duration.ofHours(safeToLong(value)); + + case DAY, DAYS, D -> Period.ofDays(safeToInt(safeToLong(value))); + case WEEK, WEEKS, W -> Period.ofWeeks(safeToInt(safeToLong(value))); + case MONTH, MONTHS, MO -> Period.ofMonths(safeToInt(safeToLong(value))); + case QUARTER, QUARTERS, Q -> Period.ofMonths(safeToInt(Math.multiplyExact(3L, safeToLong(value)))); + case YEAR, YEARS, YR, Y -> Period.ofYears(safeToInt(safeToLong(value))); + }; + } catch (IllegalArgumentException e) { + throw new ParsingException(source, "Unexpected time interval qualifier: '{}'", qualifier); + } } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index b50b801785a9f..4cf6ae2c3986b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -44,6 +44,9 @@ public class VerifierTests extends ESTestCase { private final Analyzer defaultAnalyzer = AnalyzerTestUtils.expandedDefaultAnalyzer(); private final Analyzer tsdb = AnalyzerTestUtils.analyzer(AnalyzerTestUtils.tsdbIndexResolution()); + private final List TIME_DURATIONS = List.of("millisecond", "second", "minute", "hour"); + private final List DATE_PERIODS = List.of("day", "week", "month", "year"); + public void testIncompatibleTypesInMathOperation() { assertEquals( "1:40: second argument of [a + c] must be [datetime or numeric], found value [c] type [keyword]", @@ -677,42 +680,171 @@ public void testWrongInputParam() { } public void testPeriodAndDurationInRowAssignment() { - for (var unit : List.of("millisecond", "second", "minute", "hour", "day", "week", "month", "year")) { + for (var unit : TIME_DURATIONS) { assertEquals("1:5: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); + assertEquals( + "1:5: cannot use [1 " + unit + "::time_duration] directly in a row assignment", + error("row a = 1 " + unit + "::time_duration") + ); + assertEquals( + "1:5: cannot use [\"1 " + unit + "\"::time_duration] directly in a row assignment", + error("row a = \"1 " + unit + "\"::time_duration") + ); + assertEquals( + "1:5: cannot use [to_timeduration(1 " + unit + ")] directly in a row assignment", + error("row a = to_timeduration(1 " + unit + ")") + ); + assertEquals( + "1:5: cannot use [to_timeduration(\"1 " + unit + "\")] directly in a row assignment", + error("row a = to_timeduration(\"1 " + unit + "\")") + ); + } + for (var unit : DATE_PERIODS) { + assertEquals("1:5: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); + assertEquals( + "1:5: cannot use [1 " + unit + "::date_period] directly in a row assignment", + error("row a = 1 " + unit + "::date_period") + ); + assertEquals( + "1:5: cannot use [\"1 " + unit + "\"::date_period] directly in a row assignment", + error("row a = \"1 " + unit + "\"::date_period") + ); + assertEquals( + "1:5: cannot use [to_dateperiod(1 " + unit + ")] directly in a row assignment", + error("row a = to_dateperiod(1 " + unit + ")") + ); + assertEquals( + "1:5: cannot use [to_dateperiod(\"1 " + unit + "\")] directly in a row assignment", + error("row a = to_dateperiod(\"1 " + unit + "\")") + ); } } public void testSubtractDateTimeFromTemporal() { - for (var unit : List.of("millisecond", "second", "minute", "hour")) { + for (var unit : TIME_DURATIONS) { assertEquals( - "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] from a [TIME_DURATION] amount [1 " + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [TIME_DURATION] amount [1 " + unit + "]", error("row 1 " + unit + " - now() ") ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [TIME_DURATION] amount [1 " + + unit + + "::time_duration]", + error("row 1 " + unit + "::time_duration" + " - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [TIME_DURATION] amount [\"1 " + + unit + + "\"::time_duration]", + error("row \"1 " + unit + "\"::time_duration" + " - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [TIME_DURATION] amount [to_timeduration(1 " + + unit + + ")]", + error("row to_timeduration(1 " + unit + ") - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [TIME_DURATION] amount [to_timeduration(\"1 " + + unit + + "\")]", + error("row to_timeduration(\"1 " + unit + "\") - now() ") + ); } - for (var unit : List.of("day", "week", "month", "year")) { + for (var unit : DATE_PERIODS) { assertEquals( - "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] from a [DATE_PERIOD] amount [1 " + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [DATE_PERIOD] amount [1 " + unit + "]", error("row 1 " + unit + " - now() ") ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [DATE_PERIOD] amount [1 " + + unit + + "::date_period]", + error("row 1 " + unit + "::date_period" + " - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [DATE_PERIOD] amount [\"1 " + + unit + + "\"::date_period]", + error("row \"1 " + unit + "\"::date_period" + " - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [DATE_PERIOD] amount [to_dateperiod(1 " + + unit + + ")]", + error("row to_dateperiod(1 " + unit + ") - now() ") + ); + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] " + + "from a [DATE_PERIOD] amount [to_dateperiod(\"1 " + + unit + + "\")]", + error("row to_dateperiod(\"1 " + unit + "\") - now() ") + ); } } public void testPeriodAndDurationInEval() { - for (var unit : List.of("millisecond", "second", "minute", "hour")) { + for (var unit : TIME_DURATIONS) { assertEquals( - "1:18: EVAL does not support type [time_duration] in expression [1 " + unit + "]", + "1:18: EVAL does not support type [time_duration] as the return data type of expression [1 " + unit + "]", error("row x = 1 | eval y = 1 " + unit) ); + assertEquals( + "1:18: EVAL does not support type [time_duration] as the return data type of expression [1 " + unit + "::time_duration]", + error("row x = 1 | eval y = 1 " + unit + "::time_duration") + ); + assertEquals( + "1:18: EVAL does not support type [time_duration] as the return data type of expression [\"1 " + + unit + + "\"::time_duration]", + error("row x = 1 | eval y = \"1 " + unit + "\"::time_duration") + ); + assertEquals( + "1:18: EVAL does not support type [time_duration] as the return data type of expression [to_timeduration(1 " + unit + ")]", + error("row x = 1 | eval y = to_timeduration(1 " + unit + ")") + ); + assertEquals( + "1:18: EVAL does not support type [time_duration] as the return data type of expression [to_timeduration(\"1 " + + unit + + "\")]", + error("row x = 1 | eval y = to_timeduration(\"1 " + unit + "\")") + ); } - for (var unit : List.of("day", "week", "month", "year")) { + for (var unit : DATE_PERIODS) { assertEquals( - "1:18: EVAL does not support type [date_period] in expression [1 " + unit + "]", + "1:18: EVAL does not support type [date_period] as the return data type of expression [1 " + unit + "]", error("row x = 1 | eval y = 1 " + unit) ); + assertEquals( + "1:18: EVAL does not support type [date_period] as the return data type of expression [1 " + unit + "::date_period]", + error("row x = 1 | eval y = 1 " + unit + "::date_period") + ); + assertEquals( + "1:18: EVAL does not support type [date_period] as the return data type of expression [\"1 " + unit + "\"::date_period]", + error("row x = 1 | eval y = \"1 " + unit + "\"::date_period") + ); + assertEquals( + "1:18: EVAL does not support type [date_period] as the return data type of expression [to_dateperiod(1 " + unit + ")]", + error("row x = 1 | eval y = to_dateperiod(1 " + unit + ")") + ); + assertEquals( + "1:18: EVAL does not support type [date_period] as the return data type of expression [to_dateperiod(\"1 " + unit + "\")]", + error("row x = 1 | eval y = to_dateperiod(\"1 " + unit + "\")") + ); } } @@ -722,6 +854,14 @@ public void testFilterNonBoolField() { public void testFilterDateConstant() { assertEquals("1:19: Condition expression needs to be boolean, found [DATE_PERIOD]", error("from test | where 1 year")); + assertEquals( + "1:19: Condition expression needs to be boolean, found [DATE_PERIOD]", + error("from test | where \"1 year\"::date_period") + ); + assertEquals( + "1:19: Condition expression needs to be boolean, found [DATE_PERIOD]", + error("from test | where to_dateperiod(\"1 year\")") + ); } public void testNestedAggField() { @@ -1055,10 +1195,91 @@ public void testCoalesceWithMixedNumericTypes() { ); } - public void test() { + public void testToDatePeriodTimeDurationInInvalidPosition() { + // arithmetic operations in eval assertEquals( - "1:23: second argument of [coalesce(network.bytes_in, 0)] must be [counter_long], found value [0] type [integer]", - error("FROM tests | eval x = coalesce(network.bytes_in, 0)", tsdb) + "1:39: EVAL does not support type [date_period] as the return data type of expression [3 months + 5 days]", + error("row x = \"2024-01-01\"::datetime | eval y = 3 months + 5 days") + ); + + assertEquals( + "1:39: EVAL does not support type [date_period] as the return data type of expression " + + "[\"3 months\"::date_period + \"5 days\"::date_period]", + error("row x = \"2024-01-01\"::datetime | eval y = \"3 months\"::date_period + \"5 days\"::date_period") + ); + + assertEquals( + "1:39: EVAL does not support type [time_duration] as the return data type of expression [3 hours + 5 minutes]", + error("row x = \"2024-01-01\"::datetime | eval y = 3 hours + 5 minutes") + ); + + assertEquals( + "1:39: EVAL does not support type [time_duration] as the return data type of expression " + + "[\"3 hours\"::time_duration + \"5 minutes\"::time_duration]", + error("row x = \"2024-01-01\"::datetime | eval y = \"3 hours\"::time_duration + \"5 minutes\"::time_duration") + ); + + // where + assertEquals( + "1:26: first argument of [\"3 days\"::date_period == to_dateperiod(\"3 days\")] must be " + + "[boolean, cartesian_point, cartesian_shape, date_nanos, datetime, double, geo_point, geo_shape, integer, ip, keyword, " + + "long, text, unsigned_long or version], found value [\"3 days\"::date_period] type [date_period]", + error("row x = \"3 days\" | where \"3 days\"::date_period == to_dateperiod(\"3 days\")") + ); + + assertEquals( + "1:26: first argument of [\"3 hours\"::time_duration <= to_timeduration(\"3 hours\")] must be " + + "[date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version], " + + "found value [\"3 hours\"::time_duration] type [time_duration]", + error("row x = \"3 days\" | where \"3 hours\"::time_duration <= to_timeduration(\"3 hours\")") + ); + + assertEquals( + "1:19: second argument of [first_name <= to_timeduration(\"3 hours\")] must be " + + "[date_nanos, datetime, double, integer, ip, keyword, long, text, unsigned_long or version], " + + "found value [to_timeduration(\"3 hours\")] type [time_duration]", + error("from test | where first_name <= to_timeduration(\"3 hours\")") + ); + + assertEquals( + "1:19: 1st argument of [first_name IN ( to_timeduration(\"3 hours\"), \"3 days\"::date_period)] must be [keyword], " + + "found value [to_timeduration(\"3 hours\")] type [time_duration]", + error("from test | where first_name IN ( to_timeduration(\"3 hours\"), \"3 days\"::date_period)") + ); + } + + public void testToDatePeriodToTimeDurationWithInvalidType() { + assertEquals( + "1:36: argument of [1.5::date_period] must be [date_period or string], found value [1.5] type [double]", + error("from types | EVAL x = birth_date + 1.5::date_period") + ); + assertEquals( + "1:37: argument of [to_timeduration(1)] must be [time_duration or string], found value [1] type [integer]", + error("from types | EVAL x = birth_date - to_timeduration(1)") + ); + assertEquals( + "1:45: argument of [x::date_period] must be [date_period or string], found value [x] type [double]", + error("from types | EVAL x = 1.5, y = birth_date + x::date_period") + ); + assertEquals( + "1:44: argument of [to_timeduration(x)] must be [time_duration or string], found value [x] type [integer]", + error("from types | EVAL x = 1, y = birth_date - to_timeduration(x)") + ); + assertEquals( + "1:64: argument of [x::date_period] must be [date_period or string], found value [x] type [datetime]", + error("from types | EVAL x = \"2024-09-08\"::datetime, y = birth_date + x::date_period") + ); + assertEquals( + "1:65: argument of [to_timeduration(x)] must be [time_duration or string], found value [x] type [datetime]", + error("from types | EVAL x = \"2024-09-08\"::datetime, y = birth_date - to_timeduration(x)") + ); + assertEquals( + "1:58: argument of [x::date_period] must be [date_period or string], found value [x] type [ip]", + error("from types | EVAL x = \"2024-09-08\"::ip, y = birth_date + x::date_period") + ); + assertEquals( + "1:59: argument of [to_timeduration(x)] must be [time_duration or string], found value [x] type [ip]", + error("from types | EVAL x = \"2024-09-08\"::ip, y = birth_date - to_timeduration(x)") ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriodTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriodTests.java new file mode 100644 index 0000000000000..c7e18e453df0c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatePeriodTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; + +import java.time.Period; +import java.time.temporal.TemporalAmount; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.randomLiteral; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; +import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.DATE_PERIODS; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.matchesPattern; + +@FunctionName("to_dateperiod") +public class ToDatePeriodTests extends AbstractScalarFunctionTestCase { + public ToDatePeriodTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(new TestCaseSupplier(List.of(DATE_PERIOD), () -> { + Period field = (Period) randomLiteral(DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(field, DATE_PERIOD, "field").forceLiteral()), + matchesPattern("LiteralsEvaluator.*"), + DATE_PERIOD, + equalTo(field) + ); + })); + + for (EsqlDataTypeConverter.INTERVALS interval : DATE_PERIODS) { + for (DataType inputType : List.of(KEYWORD, TEXT)) { + suppliers.add(new TestCaseSupplier(List.of(inputType), () -> { + BytesRef field = new BytesRef( + " ".repeat(randomIntBetween(0, 10)) + (randomBoolean() ? "" : "-") + randomIntBetween(0, 36500000) + " ".repeat( + randomIntBetween(1, 10) + ) + interval.toString() + " ".repeat(randomIntBetween(0, 10)) + ); + TemporalAmount result = EsqlDataTypeConverter.parseTemporalAmount(field.utf8ToString(), DATE_PERIOD); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(field, inputType, "field").forceLiteral()), + matchesPattern("LiteralsEvaluator.*"), + DATE_PERIOD, + equalTo(result) + ); + })); + } + } + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), (v, p) -> "date_period or string") + ); + } + + @Override + protected Expression build(Source source, List args) { + return new ToDatePeriod(source, args.get(0)); + } + + @Override + public void testSerializationOfSimple() { + assertTrue("Serialization test does not apply", true); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDurationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDurationTests.java new file mode 100644 index 0000000000000..b3f666ec6c5c2 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToTimeDurationTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; + +import java.time.Duration; +import java.time.temporal.TemporalAmount; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.EsqlTestUtils.randomLiteral; +import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.TIME_DURATIONS; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.matchesPattern; + +@FunctionName("to_timeduration") +public class ToTimeDurationTests extends AbstractScalarFunctionTestCase { + public ToTimeDurationTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(new TestCaseSupplier(List.of(TIME_DURATION), () -> { + Duration field = (Duration) randomLiteral(TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(field, TIME_DURATION, "field").forceLiteral()), + matchesPattern("LiteralsEvaluator.*"), + TIME_DURATION, + equalTo(field) + ); + })); + + for (EsqlDataTypeConverter.INTERVALS interval : TIME_DURATIONS) { + for (DataType inputType : List.of(KEYWORD, TEXT)) { + suppliers.add(new TestCaseSupplier(List.of(inputType), () -> { + BytesRef field = new BytesRef( + " ".repeat(randomIntBetween(0, 10)) + (randomBoolean() ? "" : "-") + randomIntBetween(0, Integer.MAX_VALUE) + " " + .repeat(randomIntBetween(1, 10)) + interval.toString() + " ".repeat(randomIntBetween(0, 10)) + ); + TemporalAmount result = EsqlDataTypeConverter.parseTemporalAmount(field.utf8ToString(), TIME_DURATION); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(field, inputType, "field").forceLiteral()), + matchesPattern("LiteralsEvaluator.*"), + TIME_DURATION, + equalTo(result) + ); + })); + } + } + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), (v, p) -> "time_duration or string") + ); + } + + @Override + protected Expression build(Source source, List args) { + return new ToTimeDuration(source, args.get(0)); + } + + @Override + public void testSerializationOfSimple() { + assertTrue("Serialization test does not apply", true); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 3cd9221d90c81..22a4b410a6d7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -5419,6 +5419,147 @@ public void testMvSortInvalidOrder() { assertEquals("Invalid order value in [mv_sort(v, o)], expected one of [ASC, DESC] but got [dsc]", iae.getMessage()); } + public void testToDatePeriodTimeDurationInvalidIntervals() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "3 dys", x = date + interval::date_period""")); + assertEquals( + "Invalid interval value in [interval::date_period], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [3 dys]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "- 3 days", x = date + interval::date_period""")); + assertEquals( + "Invalid interval value in [interval::date_period], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [- 3 days]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "3 dys", x = date - to_dateperiod(interval)""")); + assertEquals( + "Invalid interval value in [to_dateperiod(interval)], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [3 dys]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "- 3 days", x = date - to_dateperiod(interval)""")); + assertEquals( + "Invalid interval value in [to_dateperiod(interval)], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [- 3 days]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "3 ours", x = date + interval::time_duration""")); + assertEquals( + "Invalid interval value in [interval::time_duration], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3 ours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "- 3 hours", x = date + interval::time_duration""")); + assertEquals( + "Invalid interval value in [interval::time_duration], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [- 3 hours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "3 ours", x = date - to_timeduration(interval)""")); + assertEquals( + "Invalid interval value in [to_timeduration(interval)], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3 ours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "- 3 hours", x = date - to_timeduration(interval)""")); + assertEquals( + "Invalid interval value in [to_timeduration(interval)], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [- 3 hours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + from types | EVAL interval = "3.5 hours", x = date - to_timeduration(interval)""")); + assertEquals( + "Invalid interval value in [to_timeduration(interval)], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3.5 hours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + row x = "2024-01-01"::datetime | eval y = x + "3 dys"::date_period""")); + assertEquals( + "Invalid interval value in [\"3 dys\"::date_period], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [3 dys]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + row x = "2024-01-01"::datetime | eval y = x - to_dateperiod("3 dys")""")); + assertEquals( + "Invalid interval value in [to_dateperiod(\"3 dys\")], expected integer followed by one of " + + "[DAY, DAYS, D, WEEK, WEEKS, W, MONTH, MONTHS, MO, QUARTER, QUARTERS, Q, YEAR, YEARS, YR, Y] but got [3 dys]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + row x = "2024-01-01"::datetime | eval y = x + "3 ours"::time_duration""")); + assertEquals( + "Invalid interval value in [\"3 ours\"::time_duration], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3 ours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + row x = "2024-01-01"::datetime | eval y = x - to_timeduration("3 ours")""")); + assertEquals( + "Invalid interval value in [to_timeduration(\"3 ours\")], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3 ours]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> planTypes(""" + row x = "2024-01-01"::datetime | eval y = x - to_timeduration("3.5 hours")""")); + assertEquals( + "Invalid interval value in [to_timeduration(\"3.5 hours\")], expected integer followed by one of " + + "[MILLISECOND, MILLISECONDS, MS, SECOND, SECONDS, SEC, S, MINUTE, MINUTES, MIN, HOUR, HOURS, H] but got [3.5 hours]", + e.getMessage() + ); + } + + public void testToDatePeriodToTimeDurationWithField() { + final String header = "Found 1 problem\nline "; + VerificationException e = expectThrows(VerificationException.class, () -> planTypes(""" + from types | EVAL x = date + keyword::date_period""")); + assertTrue(e.getMessage().startsWith("Found ")); + assertEquals( + "1:30: argument of [keyword::date_period] must be a constant, received [keyword]", + e.getMessage().substring(header.length()) + ); + + e = expectThrows(VerificationException.class, () -> planTypes(""" + from types | EVAL x = date - to_timeduration(keyword)""")); + assertEquals( + "1:47: argument of [to_timeduration(keyword)] must be a constant, received [keyword]", + e.getMessage().substring(header.length()) + ); + + e = expectThrows(VerificationException.class, () -> planTypes(""" + from types | EVAL x = keyword, y = date + x::date_period""")); + assertTrue(e.getMessage().startsWith("Found ")); + assertEquals("1:43: argument of [x::date_period] must be a constant, received [x]", e.getMessage().substring(header.length())); + + e = expectThrows(VerificationException.class, () -> planTypes(""" + from types | EVAL x = keyword, y = date - to_timeduration(x)""")); + assertEquals("1:60: argument of [to_timeduration(x)] must be a constant, received [x]", e.getMessage().substring(header.length())); + } + private Literal nullOf(DataType dataType) { return new Literal(Source.EMPTY, null, dataType); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 35a22fd542a1e..9af152116e1e1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1413,6 +1413,22 @@ public void testParamMixed() { ); } + public void testIntervalParam() { + LogicalPlan stm = statement( + "row x = ?1::datetime | eval y = ?1::datetime + ?2::date_period", + new QueryParams(List.of(new QueryParam("datetime", "2024-01-01", KEYWORD), new QueryParam("date_period", "3 days", KEYWORD))) + ); + assertThat(stm, instanceOf(Eval.class)); + Eval eval = (Eval) stm; + assertThat(eval.fields().size(), is(1)); + + NamedExpression field = eval.fields().get(0); + assertThat(field.name(), is("y")); + assertThat(field, instanceOf(Alias.class)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left().children().get(0)).value(), equalTo("2024-01-01")); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right().children().get(0)).value(), equalTo("3 days")); + } + public void testFieldContainingDotsAndNumbers() { LogicalPlan where = processingCommand("where `a.b.1m.4321`"); assertThat(where, instanceOf(Filter.class)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml index e168fc589d11f..132d54fa9ac1e 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml @@ -393,6 +393,35 @@ setup: - match: {values.1: ["1",2.0,null,true,123,1674835275193]} - match: {values.2: ["1",2.0,null,true,123,1674835275193]} +--- +"Test Interval in Input Params": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: POST + path: /_query + parameters: [ ] + capabilities: [ cast_string_literal_to_temporal_amount ] + reason: "interval in parameters" + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'row x = ?n1::datetime | eval y = x - ?n2::date_period, z = x + ?n3::time_duration' + params: [{"n1" : "2024-08-06"}, {"n2" : "3 days"}, {"n3" : "3 hours"}] + + - length: {columns: 3} + - match: {columns.0.name: "x"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "y"} + - match: {columns.1.type: "date"} + - match: {columns.2.name: "z"} + - match: {columns.2.type: "date"} + - length: {values: 1} + - match: {values.0: ["2024-08-06T00:00:00.000Z","2024-08-03T00:00:00.000Z","2024-08-06T03:00:00.000Z"]} + --- version is not allowed: - requires: From 18b187cfb0874810c49727bdfe5456c33a3d1a7a Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:20:38 -0400 Subject: [PATCH 12/31] Unmute test for issue 109315 (#112671) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 0391504eeac6e..72ca5e4d95660 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -5,9 +5,6 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-async-query-api/line_17} issue: https://github.com/elastic/elasticsearch/issues/109260 -- class: "org.elasticsearch.index.engine.frozen.FrozenIndexIT" - issue: "https://github.com/elastic/elasticsearch/issues/109315" - method: "testTimestampFieldTypeExposedByAllIndicesServices" - class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT" issue: "https://github.com/elastic/elasticsearch/issues/109318" method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling (too complex pattern)}" From f367d2799fa8f02e6d8ca904aad189770d4d7cc3 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 9 Sep 2024 17:01:57 -0500 Subject: [PATCH 13/31] Unmuting simulate ingest yaml rest test (#112684) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 72ca5e4d95660..23ad0588aa561 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -178,9 +178,6 @@ tests: - class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT method: testIndexPatternErrorMessageComparison_ESQL_SearchDSL issue: https://github.com/elastic/elasticsearch/issues/112630 -- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT - method: test {yaml=simulate.ingest/10_basic/Test mapping validation from templates} - issue: https://github.com/elastic/elasticsearch/issues/112633 - class: org.elasticsearch.compute.aggregation.blockhash.BlockHashTests method: testBytesRefLongHashHugeCombinatorialExplosion {forcePackedHash=false} issue: https://github.com/elastic/elasticsearch/issues/112442 From b633fe1ccb67f7dbf460cdc087eb60ae212a472a Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 10 Sep 2024 07:46:55 +0200 Subject: [PATCH 14/31] Replace TopBucketBuilder with a BucketPriorityQueue (#112602) BucketPriorityQueue is a much better option nowadays --- .../search/aggregations/BucketOrder.java | 2 +- .../search/aggregations/DelayedBucket.java | 2 +- .../search/aggregations/InternalOrder.java | 6 +- .../search/aggregations/TopBucketBuilder.java | 210 ------------------ .../bucket/terms/AbstractInternalTerms.java | 35 ++- .../aggregations/TopBucketBuilderTests.java | 164 -------------- 6 files changed, 30 insertions(+), 389 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java delete mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/TopBucketBuilderTests.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java index 79516de4184ba..924ea5093bafe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java @@ -133,7 +133,7 @@ public final void validate(Aggregator aggregator) throws AggregationExecutionExc * The comparator might need to reduce the {@link DelayedBucket} and therefore we need to provide the * reducer and the reduce context.The context must be on the final reduce phase. */ - abstract Comparator> delayedBucketComparator( + public abstract Comparator> delayedBucketComparator( BiFunction, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext ); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/DelayedBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/DelayedBucket.java index 19aef60b913af..2a45f174390eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/DelayedBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/DelayedBucket.java @@ -92,7 +92,7 @@ public String toString() { * Called to mark a bucket as non-competitive so it can release it can release * any sub-buckets from the breaker. */ - void nonCompetitive(AggregationReduceContext reduceContext) { + public void nonCompetitive(AggregationReduceContext reduceContext) { if (reduced != null) { // -1 for itself, -countInnerBucket for all the sub-buckets. reduceContext.consumeBucketsAndMaybeBreak(-1 - InternalMultiBucketAggregation.countInnerBucket(reduced)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index 08e64f687569a..482d915560d04 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -81,7 +81,7 @@ public Comparator comparator() { } @Override - Comparator> delayedBucketComparator( + public Comparator> delayedBucketComparator( BiFunction, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext ) { @@ -216,7 +216,7 @@ public Comparator comparator() { } @Override - Comparator> delayedBucketComparator( + public Comparator> delayedBucketComparator( BiFunction, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext ) { @@ -284,7 +284,7 @@ public Comparator comparator() { } @Override - Comparator> delayedBucketComparator( + public Comparator> delayedBucketComparator( BiFunction, AggregationReduceContext, B> reduce, AggregationReduceContext reduceContext ) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java deleted file mode 100644 index 8b94f5e37949e..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/TopBucketBuilder.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.aggregations; - -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.PriorityQueue; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.function.BiFunction; -import java.util.function.Consumer; - -/** - * Merges many buckets into the "top" buckets as sorted by {@link BucketOrder}. - */ -public abstract class TopBucketBuilder { - /** - * The number of buckets required before we switch to the - * {@link BufferingTopBucketBuilder}. If we need fewer buckets we use - * {@link PriorityQueueTopBucketBuilder}. - *

    - * The value we picked for this boundary is fairly arbitrary, but it - * is important that its bigger than the default size of the terms - * aggregation. It's basically the amount of memory you are willing to - * waste when reduce small terms aggregations so it shouldn't be too - * large either. The value we have, {@code 1024}, preallocates about - * 32k for the priority queue. - */ - static final int USE_BUFFERING_BUILDER = 1024; - - /** - * Create a {@link TopBucketBuilder} to build a list of the top buckets. - *

    - * If there are few required results we use a {@link PriorityQueueTopBucketBuilder} - * which is simpler and when the priority queue is full but allocates {@code size + 1} - * slots in an array. If there are many required results we prefer a - * {@link BufferingTopBucketBuilder} which doesn't preallocate and is faster for the - * first {@code size} results. But it's a little slower when the priority queue is full. - *

    - * It's important for this not to preallocate a bunch of memory when - * {@code size} is very very large because this backs the reduction of the {@code terms} - * aggregation and folks often set the {@code size} of that to something quite large. - * The choice in the paragraph above handles this case. - * - * @param size the requested size of the list - * @param order the sort order of the buckets - * @param nonCompetitive called with non-competitive buckets - * @param reduce function to reduce a list of buckets - * @param reduceContext the reduce context - */ - public static TopBucketBuilder build( - int size, - BucketOrder order, - Consumer> nonCompetitive, - BiFunction, AggregationReduceContext, B> reduce, - AggregationReduceContext reduceContext - ) { - if (size < USE_BUFFERING_BUILDER) { - return new PriorityQueueTopBucketBuilder<>(size, order, nonCompetitive, reduce, reduceContext); - } - return new BufferingTopBucketBuilder<>(size, order, nonCompetitive, reduce, reduceContext); - } - - protected final Consumer> nonCompetitive; - - private TopBucketBuilder(Consumer> nonCompetitive) { - this.nonCompetitive = nonCompetitive; - } - - /** - * Add a bucket if it is competitive. If there isn't space but the - * bucket is competitive then this will drop the least competitive bucket - * to make room for the new bucket. - *

    - * Instead of operating on complete buckets we this operates on a - * wrapper containing what we need to merge the buckets called - * {@link DelayedBucket}. We can evaluate some common sort criteria - * directly on the {@linkplain DelayedBucket}s so we only need to - * merge exactly the sub-buckets we need. - */ - public abstract void add(DelayedBucket bucket); - - /** - * Return the most competitive buckets sorted by the comparator. - */ - public abstract List build(); - - /** - * Collects the "top" buckets by adding them directly to a {@link PriorityQueue}. - * This is always going to be faster than {@link BufferingTopBucketBuilder} - * but it requires allocating an array of {@code size + 1}. - */ - static class PriorityQueueTopBucketBuilder extends TopBucketBuilder { - private final PriorityQueue> queue; - private final BiFunction, AggregationReduceContext, B> reduce; - private final AggregationReduceContext reduceContext; - - PriorityQueueTopBucketBuilder( - int size, - BucketOrder order, - Consumer> nonCompetitive, - BiFunction, AggregationReduceContext, B> reduce, - AggregationReduceContext reduceContext - ) { - super(nonCompetitive); - if (size >= ArrayUtil.MAX_ARRAY_LENGTH) { - throw new IllegalArgumentException("can't reduce more than [" + ArrayUtil.MAX_ARRAY_LENGTH + "] buckets"); - } - this.reduce = reduce; - this.reduceContext = reduceContext; - queue = new PriorityQueue<>(size) { - private final Comparator> comparator = order.delayedBucketComparator(reduce, reduceContext); - - @Override - protected boolean lessThan(DelayedBucket a, DelayedBucket b) { - return comparator.compare(a, b) > 0; - } - }; - } - - @Override - public void add(DelayedBucket bucket) { - DelayedBucket removed = queue.insertWithOverflow(bucket); - if (removed != null) { - nonCompetitive.accept(removed); - removed.nonCompetitive(reduceContext); - } - } - - @Override - public List build() { - List result = new ArrayList<>(queue.size()); - for (int i = queue.size() - 1; i >= 0; i--) { - result.add(queue.pop().reduced(reduce, reduceContext)); - } - Collections.reverse(result); - return result; - } - } - - /** - * Collects the "top" buckets by adding them to a {@link List} that grows - * as more buckets arrive and is converting into a - * {@link PriorityQueueTopBucketBuilder} when {@code size} buckets arrive. - */ - private static class BufferingTopBucketBuilder extends TopBucketBuilder { - private final int size; - private final BucketOrder order; - private final BiFunction, AggregationReduceContext, B> reduce; - private final AggregationReduceContext reduceContext; - - private List> buffer; - private PriorityQueueTopBucketBuilder next; - - BufferingTopBucketBuilder( - int size, - BucketOrder order, - Consumer> nonCompetitive, - BiFunction, AggregationReduceContext, B> reduce, - AggregationReduceContext reduceContext - ) { - super(nonCompetitive); - this.reduce = reduce; - this.reduceContext = reduceContext; - this.size = size; - this.order = order; - buffer = new ArrayList<>(); - } - - @Override - public void add(DelayedBucket bucket) { - if (next != null) { - assert buffer == null; - next.add(bucket); - return; - } - buffer.add(bucket); - if (buffer.size() < size) { - return; - } - next = new PriorityQueueTopBucketBuilder<>(size, order, nonCompetitive, reduce, reduceContext); - for (DelayedBucket b : buffer) { - next.queue.add(b); - } - buffer = null; - } - - @Override - public List build() { - if (next != null) { - assert buffer == null; - return next.build(); - } - List result = new ArrayList<>(buffer.size()); - for (DelayedBucket b : buffer) { - result.add(b.reduced(reduce, reduceContext)); - } - result.sort(order.comparator()); - return result; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index 4091a16d8ad4c..e4b4dbbac48ca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -20,13 +20,13 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.TopBucketBuilder; import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; import org.elasticsearch.search.aggregations.support.SamplingContext; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -295,19 +295,34 @@ public InternalAggregation get() { } }); } else if (reduceContext.isFinalReduce()) { - TopBucketBuilder top = TopBucketBuilder.build( - getRequiredSize(), - getOrder(), - removed -> otherDocCount[0] += removed.getDocCount(), + final Comparator> comparator = getOrder().delayedBucketComparator( AbstractInternalTerms.this::reduceBucket, reduceContext ); - thisReduceOrder = reduceBuckets(bucketsList, getThisReduceOrder(), bucket -> { - if (bucket.getDocCount() >= getMinDocCount()) { - top.add(bucket); + try ( + BucketPriorityQueue> top = new BucketPriorityQueue<>( + getRequiredSize(), + reduceContext.bigArrays(), + comparator + ) + ) { + thisReduceOrder = reduceBuckets(bucketsList, getThisReduceOrder(), bucket -> { + if (bucket.getDocCount() >= getMinDocCount()) { + final DelayedBucket removed = top.insertWithOverflow(bucket); + if (removed != null) { + otherDocCount[0] += removed.getDocCount(); + removed.nonCompetitive(reduceContext); + } + } + }); + // size is an integer as it should be <= getRequiredSize() + final int size = (int) top.size(); + result = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + result.add(top.pop().reduced(AbstractInternalTerms.this::reduceBucket, reduceContext)); } - }); - result = top.build(); + Collections.reverse(result); + } } else { result = new ArrayList<>(); thisReduceOrder = reduceBuckets( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/TopBucketBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/TopBucketBuilderTests.java deleted file mode 100644 index 58d97b488d667..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/TopBucketBuilderTests.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.aggregations; - -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.Strings; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation.InternalBucket; -import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalAggregationTestCase; - -import java.util.ArrayList; -import java.util.List; -import java.util.function.BiFunction; - -import static org.elasticsearch.search.aggregations.DelayedBucketTests.mockReduce; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -public class TopBucketBuilderTests extends ESTestCase { - public void testSizeOne() { - int count = between(1, 1000); - AggregationReduceContext context = InternalAggregationTestCase.emptyReduceContextBuilder().forFinalReduction(); - BiFunction, AggregationReduceContext, InternalBucket> reduce = mockReduce(context); - List nonCompetitive = new ArrayList<>(); - TopBucketBuilder builder = TopBucketBuilder.build( - 1, - BucketOrder.key(true), - b -> nonCompetitive.add(b.toString()), - reduce, - context - ); - - for (int i = 0; i < count; i++) { - builder.add(new DelayedBucket<>(List.of(bucket(i)))); - } - - List top = builder.build(); - assertThat(top, hasSize(1)); - assertThat(top.get(0).getKeyAsString(), equalTo("000000")); - assertThat(top.get(0).getDocCount(), equalTo(1L)); - for (int i = 1; i < count; i++) { - assertThat(nonCompetitive.get(i - 1), equalTo("Delayed[" + bucketKey(i) + "]")); - } - } - - public void testAllCompetitive() { - int size = between(3, 1000); - int count = between(1, size); - AggregationReduceContext context = InternalAggregationTestCase.emptyReduceContextBuilder().forFinalReduction(); - BiFunction, AggregationReduceContext, InternalBucket> reduce = mockReduce(context); - TopBucketBuilder builder = TopBucketBuilder.build( - size, - BucketOrder.key(true), - b -> fail("unexpected uncompetitive bucket " + b), - reduce, - context - ); - - for (int i = 0; i < count; i++) { - builder.add(new DelayedBucket<>(List.of(bucket(i)))); - } - - List top = builder.build(); - assertThat(top, hasSize(count)); - for (int i = 0; i < count; i++) { - assertThat(top.get(i).getKeyAsString(), equalTo(bucketKey(i))); - assertThat(top.get(i).getDocCount(), equalTo(1L)); - } - } - - public void someNonCompetitiveTestCase(int size) { - int count = between(size + 1, size * 30); - AggregationReduceContext context = InternalAggregationTestCase.emptyReduceContextBuilder().forFinalReduction(); - BiFunction, AggregationReduceContext, InternalBucket> reduce = mockReduce(context); - List nonCompetitive = new ArrayList<>(); - TopBucketBuilder builder = TopBucketBuilder.build( - size, - BucketOrder.key(true), - b -> nonCompetitive.add(b.toString()), - reduce, - context - ); - - for (int i = 0; i < count; i++) { - builder.add(new DelayedBucket<>(List.of(bucket(i)))); - } - - List top = builder.build(); - assertThat(top, hasSize(size)); - for (int i = 0; i < count; i++) { - if (i < size) { - assertThat(top.get(i).getKeyAsString(), equalTo(bucketKey(i))); - assertThat(top.get(i).getDocCount(), equalTo(1L)); - } else { - assertThat(nonCompetitive.get(i - size), equalTo("Delayed[" + bucketKey(i) + "]")); - } - } - } - - public void testSomeNonCompetitiveSmall() { - someNonCompetitiveTestCase(between(2, TopBucketBuilder.USE_BUFFERING_BUILDER - 1)); - } - - public void testSomeNonCompetitiveLarge() { - someNonCompetitiveTestCase(between(TopBucketBuilder.USE_BUFFERING_BUILDER, TopBucketBuilder.USE_BUFFERING_BUILDER * 5)); - } - - public void testHuge() { - int count = between(1, 1000); - AggregationReduceContext context = InternalAggregationTestCase.emptyReduceContextBuilder().forFinalReduction(); - BiFunction, AggregationReduceContext, InternalBucket> reduce = mockReduce(context); - TopBucketBuilder builder = TopBucketBuilder.build( - Integer.MAX_VALUE, - BucketOrder.key(true), - b -> fail("unexpected uncompetitive bucket " + b), - reduce, - context - ); - - for (int i = 0; i < count; i++) { - builder.add(new DelayedBucket<>(List.of(bucket(i)))); - } - - List top = builder.build(); - assertThat(top, hasSize(count)); - assertThat(top.get(0).getKeyAsString(), equalTo("000000")); - assertThat(top.get(0).getDocCount(), equalTo(1L)); - for (int i = 0; i < count; i++) { - assertThat(top.get(i).getKeyAsString(), equalTo(bucketKey(i))); - assertThat(top.get(i).getDocCount(), equalTo(1L)); - } - } - - public void testHugeQueueError() { - Exception e = expectThrows( - IllegalArgumentException.class, - () -> new TopBucketBuilder.PriorityQueueTopBucketBuilder<>( - ArrayUtil.MAX_ARRAY_LENGTH, - BucketOrder.key(true), - b -> fail("unexpected uncompetitive bucket " + b), - null, - null - ) - ); - assertThat(e.getMessage(), equalTo("can't reduce more than [" + ArrayUtil.MAX_ARRAY_LENGTH + "] buckets")); - } - - private String bucketKey(int index) { - return Strings.format("%06d", index); - } - - private InternalBucket bucket(int index) { - return new StringTerms.Bucket(new BytesRef(bucketKey(index)), 1, InternalAggregations.EMPTY, false, 0, DocValueFormat.RAW); - } -} From 8f07d60c2cf0cc7ece801faee46acba2f4e0344f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Sep 2024 08:17:09 +0100 Subject: [PATCH 15/31] Fix trappy timeouts in `o.e.a.a.cluster.*` (#112674) Removes all usages of `TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT` in cluster-related APIs in `:server`. Relates #107984 --- .../datastreams/DataStreamIT.java | 10 +- .../ResolveClusterDataStreamIT.java | 2 +- .../DataStreamLifecycleServiceIT.java | 4 +- .../datastreams/DataStreamsStatsTests.java | 5 +- .../datastreams/LookAHeadTimeTests.java | 4 +- .../action/CrossClusterPainlessExecuteIT.java | 2 +- .../migration/FeatureMigrationIT.java | 38 ++-- .../migration/MultiFeatureMigrationIT.java | 6 +- .../migration/SystemIndexMigrationIT.java | 6 +- .../azure/classic/AzureSimpleTests.java | 4 +- .../classic/AzureTwoStartedNodesTests.java | 4 +- .../ec2/Ec2DiscoveryUpdateSettingsTests.java | 2 +- .../discovery/gce/GceDiscoverTests.java | 8 +- .../repositories/hdfs/HdfsTests.java | 2 +- .../common/logging/LoggersTests.java | 2 +- .../action/IndicesRequestIT.java | 4 +- .../ClusterAllocationExplainIT.java | 8 +- .../TransportGetDesiredBalanceActionIT.java | 12 +- .../TransportDesiredNodesActionsIT.java | 16 +- .../admin/cluster/node/tasks/TasksIT.java | 4 +- ...ansportClusterStateActionDisruptionIT.java | 23 ++- .../admin/cluster/stats/ClusterStatsIT.java | 3 +- .../admin/indices/create/CreateIndexIT.java | 4 +- .../admin/indices/create/ShrinkIndexIT.java | 31 ++-- .../admin/indices/create/SplitIndexIT.java | 7 +- .../admin/indices/rollover/RolloverIT.java | 50 ++++-- .../shards/IndicesShardStoreRequestIT.java | 8 +- .../bulk/BulkProcessorClusterSettingsIT.java | 2 +- .../bulk/TransportSimulateBulkActionIT.java | 2 +- .../support/ActiveShardsObserverIT.java | 4 +- .../support/WaitActiveShardCountIT.java | 4 +- .../master/TransportMasterNodeActionIT.java | 8 +- .../elasticsearch/aliases/IndexAliasesIT.java | 22 ++- .../elasticsearch/blocks/SimpleBlocksIT.java | 6 +- .../cluster/ClusterHealthIT.java | 56 +++--- .../cluster/ClusterInfoServiceIT.java | 9 +- .../cluster/DesiredNodesSnapshotsIT.java | 6 +- .../cluster/DesiredNodesStatusIT.java | 28 ++- .../cluster/MinimumMasterNodesIT.java | 65 ++++--- .../elasticsearch/cluster/NoMasterNodeIT.java | 28 ++- .../cluster/PrevalidateNodeRemovalIT.java | 13 +- .../cluster/SimpleClusterStateIT.java | 67 ++++--- .../cluster/SimpleDataNodesIT.java | 24 ++- .../cluster/SpecificMasterNodesIT.java | 131 ++++++++++++-- .../cluster/UpdateSettingsValidationIT.java | 7 +- .../allocation/AwarenessAllocationIT.java | 39 +++-- .../cluster/allocation/ClusterRerouteIT.java | 38 ++-- .../allocation/FilteringAllocationIT.java | 25 +-- .../allocation/SimpleAllocationIT.java | 6 +- .../coordination/InitialClusterStateIT.java | 8 +- .../coordination/RareClusterStateIT.java | 2 +- .../coordination/RemoveCustomsCommandIT.java | 15 +- .../coordination/RemoveSettingsCommandIT.java | 6 +- .../UnsafeBootstrapAndDetachCommandIT.java | 30 +++- .../coordination/VotingConfigurationIT.java | 15 +- .../cluster/coordination/ZenDiscoveryIT.java | 2 +- .../cluster/routing/AllocationIdIT.java | 17 +- .../cluster/routing/DelayedAllocationIT.java | 29 ++-- .../cluster/routing/PrimaryAllocationIT.java | 100 ++++++++--- .../routing/RemoveReplicaPriorityIT.java | 12 +- .../cluster/routing/ShardRoutingRoleIT.java | 6 +- .../allocation/AllocationFailuresResetIT.java | 4 +- .../allocation/DiskThresholdMonitorIT.java | 8 +- .../routing/allocation/ShardStateIT.java | 11 +- .../decider/DiskThresholdDeciderIT.java | 4 +- .../allocation/decider/MockDiskUsagesIT.java | 14 +- .../UpdateShardAllocationSettingsIT.java | 4 +- .../cluster/settings/ClusterSettingsIT.java | 114 +++++++----- ...usterSettingsUpdateWithFaultyMasterIT.java | 2 +- .../cluster/shards/ClusterSearchShardsIT.java | 2 +- .../cluster/shards/ClusterShardLimitIT.java | 45 ++--- .../discovery/ClusterDisruptionIT.java | 13 +- .../discovery/DiscoveryDisruptionIT.java | 2 +- .../discovery/StableMasterDisruptionIT.java | 8 +- .../elasticsearch/document/ShardInfoIT.java | 6 +- .../elasticsearch/env/NodeEnvironmentIT.java | 4 +- .../features/ClusterFeaturesIT.java | 2 +- .../gateway/GatewayIndexStateIT.java | 58 ++++--- .../gateway/MetadataNodesIT.java | 4 +- .../gateway/QuorumGatewayIT.java | 2 +- .../gateway/RecoverAfterNodesIT.java | 56 +++++- .../gateway/RecoveryFromGatewayIT.java | 19 +- .../gateway/ReplicaShardAllocatorIT.java | 2 +- .../health/HealthMetadataServiceIT.java | 2 +- .../elasticsearch/health/HealthServiceIT.java | 2 +- .../health/UpdateHealthInfoCacheIT.java | 13 +- .../index/IndexingPressureIT.java | 12 +- .../index/seqno/GlobalCheckpointSyncIT.java | 4 +- .../index/seqno/RetentionLeaseIT.java | 2 +- .../RemoveCorruptedShardDataCommandIT.java | 8 +- .../index/store/CorruptedFileIT.java | 30 ++-- .../index/suggest/stats/SuggestStatsIT.java | 4 +- .../IndexLifecycleActionIT.java | 24 +-- ...DateMathIndexExpressionsIntegrationIT.java | 2 +- .../indices/IndicesLifecycleListenerIT.java | 10 +- .../indices/cluster/ResolveClusterIT.java | 4 +- .../indices/cluster/ShardLockFailureIT.java | 4 +- .../indices/mapping/SimpleGetMappingsIT.java | 2 +- .../mapping/UpdateMappingIntegrationIT.java | 10 +- .../breaker/CircuitBreakerServiceIT.java | 2 +- .../HierarchyCircuitBreakerTelemetryIT.java | 7 +- .../indices/recovery/DanglingIndicesIT.java | 2 +- .../recovery/IndexPrimaryRelocationIT.java | 6 +- .../indices/recovery/IndexRecoveryIT.java | 16 +- .../recovery/ReplicaToPrimaryPromotionIT.java | 8 +- .../plan/ShardSnapshotsServiceIT.java | 2 +- .../settings/UpdateNumberOfReplicasIT.java | 87 ++++++---- .../indices/settings/UpdateSettingsIT.java | 128 ++++++++++---- .../state/CloseIndexDisableCloseAllIT.java | 2 +- .../indices/state/CloseIndexIT.java | 14 +- .../indices/state/OpenCloseIndexIT.java | 22 +-- .../indices/state/ReopenWhileClosingIT.java | 2 +- .../indices/state/SimpleIndexStateIT.java | 10 +- .../indices/stats/IndexStatsIT.java | 2 +- .../store/IndicesStoreIntegrationIT.java | 34 ++-- .../template/SimpleIndexTemplateIT.java | 11 +- .../ingest/IngestFileSettingsIT.java | 2 +- .../SimpleNodesCapabilitiesIT.java | 5 +- .../nodesinfo/SimpleNodesInfoIT.java | 15 +- .../DestructiveOperationsIT.java | 6 +- .../decider/EnableAssignmentDeciderIT.java | 2 +- .../IndexFoldersDeletionListenerIT.java | 6 +- .../readiness/ReadinessClusterIT.java | 7 +- .../recovery/FullRollingRestartIT.java | 16 +- .../recovery/RecoveryWhileUnderLoadIT.java | 14 +- .../elasticsearch/recovery/RelocationIT.java | 26 +-- ...rtInactiveAutoExpandReplicaNotStaleIT.java | 2 +- .../repositories/IndexSnapshotsServiceIT.java | 2 +- .../BlobStoreRepositoryCleanupIT.java | 5 +- .../ComponentTemplatesFileSettingsIT.java | 7 +- .../service/FileSettingsServiceIT.java | 13 +- .../service/RepositoriesFileSettingsIT.java | 5 +- .../service/SnapshotsAndFileSettingsIT.java | 11 +- .../rest/discovery/Zen2RestApiIT.java | 9 +- .../routing/AliasResolveRoutingIT.java | 4 +- .../routing/PartitionedRoutingIT.java | 9 +- .../routing/SimpleRoutingIT.java | 2 +- .../SearchServiceCleanupOnLostMasterIT.java | 2 +- .../aggregations/bucket/GeoDistanceIT.java | 2 +- .../search/aggregations/bucket/RangeIT.java | 2 +- .../search/basic/SearchRedStateIndexIT.java | 8 +- .../basic/SearchWhileCreatingIndexIT.java | 4 +- .../search/basic/SearchWhileRelocatingIT.java | 2 +- .../basic/SearchWithRandomIOExceptionsIT.java | 2 +- .../basic/TransportSearchFailuresIT.java | 4 +- .../search/ccs/CCSUsageTelemetryIT.java | 2 +- .../search/ccs/CrossClusterIT.java | 2 +- .../search/ccs/CrossClusterSearchIT.java | 2 +- .../search/ccs/CrossClusterSearchLeakIT.java | 8 +- .../functionscore/FunctionScorePluginIT.java | 2 +- .../retriever/MinimalCompoundRetrieverIT.java | 2 +- .../search/retriever/RetrieverRewriteIT.java | 2 +- .../search/routing/SearchPreferenceIT.java | 2 +- .../routing/SearchReplicaSelectionIT.java | 2 +- .../search/scroll/SearchScrollIT.java | 26 +-- .../search/stats/SearchStatsIT.java | 4 +- .../snapshots/ConcurrentSnapshotsIT.java | 2 +- .../snapshots/CustomMetadataContextIT.java | 6 +- .../DedicatedClusterSnapshotRestoreIT.java | 10 +- .../snapshots/MultiClusterRepoAccessIT.java | 2 +- .../snapshots/RepositoriesIT.java | 11 +- .../snapshots/RestoreSnapshotIT.java | 21 ++- .../SharedClusterSnapshotRestoreIT.java | 21 ++- .../snapshots/SnapshotBrokenSettingsIT.java | 4 +- .../snapshots/SnapshotShutdownIT.java | 11 +- .../snapshots/SnapshotStatusApisIT.java | 2 +- .../snapshots/SnapshotStressTestsIT.java | 21 ++- .../snapshots/SystemIndicesSnapshotIT.java | 6 +- .../AddVotingConfigExclusionsRequest.java | 22 ++- .../ClearVotingConfigExclusionsRequest.java | 4 +- .../desirednodes/GetDesiredNodesAction.java | 5 +- .../TransportDeleteDesiredNodesAction.java | 11 +- .../UpdateDesiredNodesRequest.java | 24 ++- .../cluster/health/ClusterHealthRequest.java | 8 +- .../health/ClusterHealthRequestBuilder.java | 4 +- .../GetFeatureUpgradeStatusRequest.java | 5 +- .../migration/PostFeatureUpgradeRequest.java | 5 +- .../PrevalidateNodeRemovalRequest.java | 8 +- .../settings/ClusterGetSettingsAction.java | 5 +- .../ClusterUpdateSettingsRequest.java | 18 +- .../ClusterUpdateSettingsRequestBuilder.java | 5 +- .../cluster/state/ClusterStateRequest.java | 4 +- .../state/ClusterStateRequestBuilder.java | 4 +- .../tasks/PendingClusterTasksRequest.java | 5 +- .../client/internal/ClusterAdminClient.java | 12 +- .../action/ReservedClusterSettingsAction.java | 7 +- .../RestAddVotingConfigExclusionAction.java | 5 +- ...RestClearVotingConfigExclusionsAction.java | 3 +- .../cluster/RestClusterGetSettingsAction.java | 5 +- .../cluster/RestClusterHealthAction.java | 14 +- .../admin/cluster/RestClusterStateAction.java | 3 +- .../RestClusterUpdateSettingsAction.java | 7 +- .../cluster/RestGetDesiredNodesAction.java | 3 +- .../RestGetFeatureUpgradeStatusAction.java | 7 +- .../RestPendingClusterTasksAction.java | 3 +- .../cluster/RestPostFeatureUpgradeAction.java | 7 +- .../RestPrevalidateNodeRemovalAction.java | 12 +- .../cluster/RestUpdateDesiredNodesAction.java | 11 +- .../rest/action/cat/RestAllocationAction.java | 3 +- .../cat/RestCatComponentTemplateAction.java | 3 +- .../rest/action/cat/RestHealthAction.java | 5 +- .../rest/action/cat/RestIndicesAction.java | 3 +- .../rest/action/cat/RestMasterAction.java | 3 +- .../rest/action/cat/RestNodeAttrsAction.java | 3 +- .../rest/action/cat/RestNodesAction.java | 3 +- .../cat/RestPendingClusterTasksAction.java | 3 +- .../rest/action/cat/RestPluginsAction.java | 3 +- .../rest/action/cat/RestSegmentsAction.java | 3 +- .../rest/action/cat/RestShardsAction.java | 3 +- .../rest/action/cat/RestThreadPoolAction.java | 3 +- .../transport/RemoteClusterConnection.java | 5 +- .../transport/SniffConnectionStrategy.java | 5 +- ...AddVotingConfigExclusionsRequestTests.java | 73 ++++++-- ...earVotingConfigExclusionsRequestTests.java | 2 +- ...tAddVotingConfigExclusionsActionTests.java | 58 +++++-- ...learVotingConfigExclusionsActionTests.java | 12 +- ...ransportUpdateDesiredNodesActionTests.java | 15 +- ...DesiredNodesRequestSerializationTests.java | 9 +- .../UpdateDesiredNodesRequestTests.java | 2 + .../health/ClusterHealthRequestTests.java | 4 +- .../TransportClusterHealthActionTests.java | 4 +- ...eNodeRemovalRequestSerializationTests.java | 8 +- .../PrevalidateNodeRemovalRequestTests.java | 16 +- ...portPrevalidateNodeRemovalActionTests.java | 18 +- .../ClusterUpdateSettingsRequestTests.java | 12 +- .../cluster/state/ClusterStateApiTests.java | 8 +- .../state/ClusterStateRequestTests.java | 30 ++-- ...StateAwareHandledTransportActionTests.java | 12 +- .../TransportMasterNodeActionTests.java | 12 +- .../ParentTaskAssigningClientTests.java | 6 +- .../health/ClusterStateHealthTests.java | 2 +- .../metadata/DesiredNodesTestCase.java | 2 + .../MetadataIndexTemplateServiceTests.java | 8 +- .../discovery/AbstractDisruptionTestCase.java | 2 +- .../index/IndexServiceTests.java | 4 +- .../search/SearchServiceTests.java | 8 +- .../snapshots/SnapshotResiliencyTests.java | 7 +- .../transport/RemoteClusterClientTests.java | 12 +- .../test/disruption/NetworkDisruptionIT.java | 2 +- .../AbstractIndexRecoveryIntegTestCase.java | 10 +- .../search/geo/BaseShapeIntegTestCase.java | 4 +- .../AbstractSnapshotIntegTestCase.java | 2 +- .../test/AbstractMultiClustersTestCase.java | 8 +- .../elasticsearch/test/ESIntegTestCase.java | 163 ++++++++++-------- .../test/ESSingleNodeTestCase.java | 17 +- .../test/InternalTestCluster.java | 14 +- .../org/elasticsearch/test/TestCluster.java | 7 +- .../test/disruption/SingleNodeDisruption.java | 3 +- .../search/AsyncSearchIntegTestCase.java | 2 +- .../CCSUsageTelemetryAsyncSearchIT.java | 2 +- .../search/CrossClusterAsyncSearchIT.java | 2 +- .../AutoscalingFileSettingsIT.java | 2 +- ...nsportDeleteAutoscalingPolicyActionIT.java | 2 +- ...TransportPutAutoscalingPolicyActionIT.java | 4 +- .../existence/FrozenExistenceDeciderIT.java | 2 +- .../storage/ReactiveStorageIT.java | 8 +- .../elasticsearch/xpack/ccr/AutoFollowIT.java | 11 +- .../elasticsearch/xpack/ccr/CcrAliasesIT.java | 2 +- .../xpack/ccr/CcrRepositoryIT.java | 10 +- .../xpack/ccr/CcrRetentionLeaseIT.java | 60 +++++-- .../xpack/ccr/CloseFollowerIndexIT.java | 4 +- .../xpack/ccr/IndexFollowingIT.java | 28 +-- .../ccr/PrimaryFollowerAllocationIT.java | 6 +- .../xpack/ccr/RestartIndexFollowingIT.java | 6 +- .../ccr/action/AutoFollowCoordinator.java | 2 +- .../xpack/ccr/action/CcrRequests.java | 2 +- .../TransportPutAutoFollowPatternAction.java | 2 +- .../xpack/ccr/repository/CcrRepository.java | 2 +- .../elasticsearch/xpack/CcrIntegTestCase.java | 10 +- .../xpack/CcrSingleNodeTestCase.java | 4 +- .../ComponentVersionsNodesInfoIT.java | 5 +- .../sourceonly/SourceOnlySnapshotIT.java | 16 +- .../DataTierAllocationDeciderIT.java | 16 +- ...ierShardAvailabilityHealthIndicatorIT.java | 2 +- .../core/ml/annotations/AnnotationIndex.java | 3 +- .../persistence/AnomalyDetectorsIndex.java | 7 +- .../xpack/core/ml/utils/MlIndexAndAlias.java | 5 +- .../xpack/core/ClientHelperTests.java | 10 +- .../async/AsyncSearchIndexServiceTests.java | 10 +- .../DownsampleActionSingleNodeTests.java | 6 +- .../xpack/enrich/EnrichMultiNodeIT.java | 4 +- .../xpack/enrich/EnrichPolicyRunner.java | 6 +- ...ransportDeleteEnrichPolicyActionTests.java | 4 +- .../action/AbstractEsqlIntegTestCase.java | 4 +- .../esql/action/CrossClustersQueryIT.java | 2 +- .../xpack/esql/action/EsqlActionIT.java | 10 +- .../index/engine/frozen/FrozenIndexIT.java | 16 +- .../index/engine/frozen/FrozenIndexTests.java | 40 +++-- .../IndexLifecycleInitialisationTests.java | 8 +- .../xpack/ml/integration/DatafeedJobsIT.java | 6 +- ...NativeDataFrameAnalyticsIntegTestCase.java | 2 +- .../ml/integration/MlNativeIntegTestCase.java | 2 +- .../ml/integration/SetUpgradeModeIT.java | 6 +- .../ml/integration/TestFeatureResetIT.java | 6 +- .../license/MachineLearningLicensingIT.java | 8 +- .../integration/BasicDistributedJobsIT.java | 10 +- .../xpack/ml/integration/DatafeedCcsIT.java | 4 +- .../integration/MlDistributedFailureIT.java | 7 +- .../xpack/ml/integration/TooManyJobsIT.java | 9 +- .../ml/job/persistence/MockClientBuilder.java | 2 +- .../xpack/ml/support/BaseMlIntegTestCase.java | 2 +- .../exporter/http/HttpExporterSslIT.java | 2 +- .../monitoring/integration/MonitoringIT.java | 6 +- ...ransportMonitoringMigrateAlertsAction.java | 6 +- .../monitoring/MultiNodesStatsTests.java | 2 +- .../local/LocalExporterIntegTests.java | 2 +- .../lucene/bwc/ArchiveLicenseIntegTests.java | 2 +- .../profiling/action/ProfilingTestCase.java | 4 +- .../BaseSearchableSnapshotsIntegTestCase.java | 2 +- .../FrozenSearchableSnapshotsIntegTests.java | 10 +- ...movalWithSearchableSnapshotIntegTests.java | 5 +- ...pshotsCanMatchOnCoordinatorIntegTests.java | 9 +- .../SearchableSnapshotsIntegTests.java | 16 +- .../SearchableSnapshotsLicenseIntegTests.java | 2 +- ...earchableSnapshotAllocationIntegTests.java | 4 +- ...chableSnapshotDiskThresholdIntegTests.java | 12 +- ...shotEnableAllocationDeciderIntegTests.java | 3 +- .../SearchableSnapshotShutdownIntegTests.java | 12 +- ...archableSnapshotsRelocationIntegTests.java | 11 +- ...bleSnapshotsPersistentCacheIntegTests.java | 15 +- .../shared/NodesCachesStatsIntegTests.java | 2 +- ...tiallyCachedShardAllocationIntegTests.java | 25 ++- ...SnapshotRecoveryStateIntegrationTests.java | 4 +- .../ClusterPrivilegeIntegrationTests.java | 2 +- .../integration/IndexPrivilegeIntegTests.java | 4 +- .../RoleMappingFileSettingsIT.java | 13 +- .../SecurityFeatureStateIntegTests.java | 2 +- .../ShrinkIndexWithSecurityTests.java | 2 +- .../elasticsearch/license/LicensingTests.java | 4 +- .../FileSettingsRoleMappingsRestartIT.java | 8 +- .../AuditTrailSettingsUpdateTests.java | 4 +- .../security/authc/ApiKeyIntegTests.java | 2 +- .../security/authc/TokenAuthIntegTests.java | 2 +- .../authc/apikey/ApiKeySingleNodeTests.java | 2 +- .../authc/esnative/NativeRealmIntegTests.java | 70 ++++++-- ...ervedRealmElasticAutoconfigIntegTests.java | 22 ++- .../esnative/ReservedRealmIntegTests.java | 16 +- .../store/NativePrivilegeStoreCacheTests.java | 9 +- .../NativePrivilegeStoreSingleNodeTests.java | 4 +- .../OperatorPrivilegesSingleNodeTests.java | 23 ++- .../profile/SecurityDomainIntegTests.java | 6 +- .../filter/IpFilteringUpdateTests.java | 8 +- .../test/SecurityIntegTestCase.java | 2 +- .../authz/AuthorizationServiceTests.java | 18 +- .../NodeShutdownDelayedAllocationIT.java | 16 +- .../xpack/shutdown/NodeShutdownShardsIT.java | 6 +- .../xpack/slm/SnapshotLifecycleRestIT.java | 6 +- .../xpack/slm/SLMFileSettingsIT.java | 11 +- .../slm/SLMSnapshotBlockingIntegTests.java | 2 +- .../xpack/slm/SLMStatDisruptionIT.java | 10 +- .../SnapshotBasedIndexRecoveryIT.java | 14 +- .../persistence/TransformInternalIndex.java | 6 +- .../votingonly/VotingOnlyNodePluginTests.java | 42 +++-- .../AbstractWatcherIntegrationTestCase.java | 6 +- .../test/integration/SingleNodeTests.java | 2 +- .../WatcherExecutorServiceBenchmark.java | 3 +- .../bench/WatcherScheduleEngineBenchmark.java | 7 +- 357 files changed, 2640 insertions(+), 1479 deletions(-) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index e3da69b7b2f0b..ebe5546c0907f 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -597,8 +597,8 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { false ); verifyResolvability(dataStreamName, indicesAdmin().prepareGetSettings(dataStreamName), false); - verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(dataStreamName), false); - verifyResolvability(dataStreamName, clusterAdmin().prepareState().setIndices(dataStreamName), false); + verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false); + verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false); verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false); verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex().addIndices(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false); @@ -644,8 +644,8 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { indicesAdmin().prepareUpdateSettings(wildcardExpression).setSettings(Settings.builder().put("index.number_of_replicas", 0)), false ); - verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(wildcardExpression), false); - verifyResolvability(wildcardExpression, clusterAdmin().prepareState().setIndices(wildcardExpression), false); + verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false); + verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); @@ -1594,7 +1594,7 @@ public void testClusterStateIncludeDataStream() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); // when querying a backing index then the data stream should be included as well. - ClusterStateRequest request = new ClusterStateRequest().indices(".ds-metrics-foo-*000001"); + ClusterStateRequest request = new ClusterStateRequest(TEST_REQUEST_TIMEOUT).indices(".ds-metrics-foo-*000001"); ClusterState state = clusterAdmin().state(request).get().getState(); assertThat(state.metadata().dataStreams().size(), equalTo(1)); assertThat(state.metadata().dataStreams().get("metrics-foo").getName(), equalTo("metrics-foo")); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java index ef785086a0ef4..7fdc3b660433d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -405,7 +405,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_2).admin() .cluster() - .prepareHealth(remoteIndex2) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex2) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index ee17521ad757d..65f911d27bf65 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -917,7 +917,7 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception String firstGenerationIndex = getBackingIndices(dataStreamName).get(0); ClusterGetSettingsAction.Response response = client().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).get(); Settings clusterSettings = response.persistentSettings(); @@ -1093,7 +1093,7 @@ public void testLifecycleAppliedToFailureStore() throws Exception { // Let's verify the merge settings ClusterGetSettingsAction.Response response = client().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).get(); Settings clusterSettings = response.persistentSettings(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index bc313d145c17e..b2ddab164b31b 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -131,7 +131,10 @@ public void testStatsClosedBackingIndexDataStream() throws Exception { assertTrue(indicesAdmin().close(new CloseIndexRequest(".ds-" + dataStreamName + "-*-000001")).actionGet().isAcknowledged()); assertBusy( - () -> assertNotEquals(ClusterHealthStatus.RED, clusterAdmin().health(new ClusterHealthRequest()).actionGet().getStatus()) + () -> assertNotEquals( + ClusterHealthStatus.RED, + clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT)).actionGet().getStatus() + ) ); DataStreamsStatsAction.Response stats = getDataStreamsStats(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java index a612587262463..e3d5ad0d63e84 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java @@ -118,7 +118,9 @@ public void testLookAheadTimeSettingHigherThanTimeSeriesPollIntervalSetting() { } private void updateClusterSettings(Settings settings) { - clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(settings)).actionGet(); + clusterAdmin().updateSettings( + new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings(settings) + ).actionGet(); } private void updateIndexSettings(Settings settings) { diff --git a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java index 1bd6468c562f8..99fb0edd4334f 100644 --- a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java +++ b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java @@ -175,7 +175,7 @@ private void setupTwoClusters() throws Exception { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(REMOTE_INDEX) + .prepareHealth(TEST_REQUEST_TIMEOUT, REMOTE_INDEX) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index ac850e991296c..bf34c322c1a95 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -88,8 +88,8 @@ public void testStartMigrationAndImmediatelyCheckStatus() throws Exception { ensureGreen(); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); // Start the migration and *immediately* request the status. We're trying to detect a race condition with this test, so we need to // do this as fast as possible, but not before the request to start the migration completes. @@ -170,7 +170,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { postUpgradeHookCalled.set(true); }); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); assertThat(migrationResponse.getReason(), nullValue()); assertThat(migrationResponse.getElasticsearchException(), nullValue()); @@ -180,7 +180,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { .collect(Collectors.toSet()); assertThat(migratingFeatures, hasItem(FEATURE_NAME)); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); // The feature upgrade may take longer than ten seconds when tests are running // in parallel, so we give assertBusy a sixty-second timeout. assertBusy(() -> { @@ -196,7 +196,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { assertTrue("the pre-migration hook wasn't actually called", preUpgradeHookCalled.get()); assertTrue("the post-migration hook wasn't actually called", postUpgradeHookCalled.get()); - Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); + Metadata finalMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); // Check that the results metadata is what we expect. FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); @@ -246,12 +246,12 @@ public void testMigrateIndexWithWriteBlock() throws Exception { updateIndexSettings(Settings.builder().put("index.blocks.write", true), indexName); ensureGreen(); - client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()).get(); + client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT)).get(); assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -299,7 +299,7 @@ public void onFailure(Exception e) { fail("cluster state update failed, see log for details"); } - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); // Make sure we actually started the migration assertTrue( @@ -309,7 +309,7 @@ public void onFailure(Exception e) { // Now wait for the migration to finish (otherwise the test infra explodes) assertBusy(() -> { - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); GetFeatureUpgradeStatusResponse statusResp = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -337,8 +337,10 @@ private void migrateWithTemplatesV1(String templatePrefix, SystemIndexDescriptor ensureGreen(); - PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()) - .get(); + PostFeatureUpgradeResponse migrationResponse = client().execute( + PostFeatureUpgradeAction.INSTANCE, + new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertTrue(migrationResponse.isAccepted()); } @@ -349,7 +351,7 @@ public void testBailOnMigrateWithTemplatesV1() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR)); @@ -364,7 +366,7 @@ public void testMigrateWithTemplatesV1() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -426,8 +428,10 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc ensureGreen(); - PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()) - .get(); + PostFeatureUpgradeResponse migrationResponse = client().execute( + PostFeatureUpgradeAction.INSTANCE, + new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertTrue(migrationResponse.isAccepted()); } @@ -437,7 +441,7 @@ public void testBailOnMigrateWithTemplatesV2() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR)); @@ -452,7 +456,7 @@ public void testMigrateWithTemplatesV2() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index 8f9c2b7f34105..ebe4b1835b103 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -176,7 +176,7 @@ public void testMultipleFeatureMigration() throws Exception { hooksCalled.countDown(); }); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); assertThat(migrationResponse.getReason(), nullValue()); assertThat(migrationResponse.getElasticsearchException(), nullValue()); @@ -189,7 +189,7 @@ public void testMultipleFeatureMigration() throws Exception { // wait for all the plugin methods to have been called before assertBusy since that will exponentially backoff assertThat(hooksCalled.await(30, TimeUnit.SECONDS), is(true)); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) .get(); @@ -203,7 +203,7 @@ public void testMultipleFeatureMigration() throws Exception { assertTrue("the second plugin's pre-migration hook wasn't actually called", secondPluginPreMigrationHookCalled.get()); assertTrue("the second plugin's post-migration hook wasn't actually called", secondPluginPostMigrationHookCalled.get()); - Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); + Metadata finalMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); // Check that the results metadata is what we expect FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java index 47c6e8faf15bf..6484d483bbcd8 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java @@ -85,7 +85,7 @@ public void testSystemIndexMigrationCanBeInterruptedWithShutdown() throws Except clusterService.addListener(clusterStateListener); // create task by calling API - final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(); + final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); client().execute(PostFeatureUpgradeAction.INSTANCE, req); logger.info("migrate feature api called"); @@ -101,12 +101,12 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertBusy(() -> { // Wait for the node we restarted to completely rejoin the cluster - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat("expected restarted node to rejoin cluster", clusterState.getNodes().size(), equalTo(2)); GetFeatureUpgradeStatusResponse statusResponse = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); assertThat( "expected migration to fail due to restarting only data node", diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 9a55bfde38b3c..a3e8a3a02f93b 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -30,7 +30,7 @@ public void testOneNodeShouldRunUsingPrivateIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() @@ -52,7 +52,7 @@ public void testOneNodeShouldRunUsingPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index b8d0a1ef7bdd5..6c07670266278 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -33,7 +33,7 @@ public void testTwoNodesShouldRunUsingPrivateOrPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() @@ -47,7 +47,7 @@ public void testTwoNodesShouldRunUsingPrivateOrPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() diff --git a/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 033e0e3823536..0ed530c9ee3de 100644 --- a/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -31,7 +31,7 @@ public void testMinimumMasterNodesStart() { // We try to update a setting now final String expectedValue = UUIDs.randomBase64UUID(random()); final String settingName = "cluster.routing.allocation.exclude.any_attribute"; - final ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + final ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(settingName, expectedValue)) .get(); diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 32be38ac7f813..ca8a4449c4d6d 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -67,7 +67,7 @@ public void testJoin() { ClusterStateResponse clusterStateResponse = client(masterNode).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .clear() .setNodes(true) @@ -79,7 +79,7 @@ public void testJoin() { registerGceNode(secondNode); clusterStateResponse = client(secondNode).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .clear() .setNodes(true) @@ -88,13 +88,13 @@ public void testJoin() { assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); // wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); + assertNoTimeout(client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(2)).get()); assertNumberOfNodes(2); // add one more node and wait for it to join final String thirdNode = internalCluster().startDataOnlyNode(); registerGceNode(thirdNode); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); + assertNoTimeout(client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(3)).get()); assertNumberOfNodes(3); } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 081c6c26319ab..39bc59012ed09 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -141,7 +141,7 @@ public void testSimpleWorkflow() { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(count(client, "test-idx-1"), equalTo(100L)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); diff --git a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index bd7e086d01f0d..5af036a9a0391 100644 --- a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -29,7 +29,7 @@ public class LoggersTests extends ESTestCase { public void testClusterUpdateSettingsRequestValidationForLoggers() { assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); for (String logger : Loggers.RESTRICTED_LOGGERS) { var validation = request.persistentSettings(Map.of("logger." + logger, org.elasticsearch.logging.Level.DEBUG)).validate(); assertNotNull(validation); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 920677e8c4b4a..27f8fc915cdd7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -395,7 +395,7 @@ public void testFlush() { clearInterceptedActions(); String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() - .concreteIndexNames(clusterAdmin().prepareState().get().getState(), flushRequest); + .concreteIndexNames(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), flushRequest); assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } @@ -422,7 +422,7 @@ public void testRefresh() { clearInterceptedActions(); String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() - .concreteIndexNames(clusterAdmin().prepareState().get().getState(), refreshRequest); + .concreteIndexNames(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), refreshRequest); assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 897f10b031dcb..245e7cede4cf5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -1053,7 +1053,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { logger.info("--> close the index, now the replica is stale"); assertAcked(indicesAdmin().prepareClose("idx")); - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth("idx") + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForActiveShards(ActiveShardCount.ONE) .setWaitForNoInitializingShards(true) @@ -1254,7 +1254,7 @@ private void prepareIndex( if (state == IndexMetadata.State.CLOSE) { assertAcked(indicesAdmin().prepareClose("idx")); - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth("idx") + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForActiveShards(activeShardCount) .setWaitForEvents(Priority.LANGUID) @@ -1275,13 +1275,13 @@ private void indexData() { } private String primaryNodeName() { - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.getRoutingTable().index("idx").shard(0).primaryShard().currentNodeId(); return clusterState.getRoutingNodes().node(nodeId).node().getName(); } private DiscoveryNode replicaNode() { - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0).currentNodeId(); return clusterState.getRoutingNodes().node(nodeId).node(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java index d0e0543bcca03..54b1b08806a93 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java @@ -37,7 +37,9 @@ public void testDesiredBalanceOnMultiNodeCluster() throws Exception { indexData(index); - var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)).get(); + var clusterHealthResponse = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT).waitForStatus(ClusterHealthStatus.GREEN) + ).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); final var desiredBalanceResponse = safeGet( @@ -50,7 +52,7 @@ public void testDesiredBalanceOnMultiNodeCluster() throws Exception { for (var entry : shardsMap.entrySet()) { Integer shardId = entry.getKey(); DesiredBalanceResponse.DesiredShards desiredShards = entry.getValue(); - IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState() + IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .routingTable() @@ -73,7 +75,9 @@ public void testDesiredBalanceWithUnassignedShards() throws Exception { int numberOfReplicas = 1; createIndex(index, numberOfShards, numberOfReplicas); indexData(index); - var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest(index).waitForStatus(ClusterHealthStatus.YELLOW)).get(); + var clusterHealthResponse = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, index).waitForStatus(ClusterHealthStatus.YELLOW) + ).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); final var desiredBalanceResponse = safeGet( @@ -86,7 +90,7 @@ public void testDesiredBalanceWithUnassignedShards() throws Exception { for (var entry : shardsMap.entrySet()) { Integer shardId = entry.getKey(); DesiredBalanceResponse.DesiredShards desiredShards = entry.getValue(); - IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState() + IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .routingTable() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java index 63801f8c1e511..0fb8b450ffaff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java @@ -87,6 +87,8 @@ public void testUpdateDesiredNodesIsIdempotent() { } final var equivalentUpdateRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion(), desiredNodesList, @@ -105,6 +107,8 @@ public void testGoingBackwardsWithinTheSameHistoryIsForbidden() { updateDesiredNodes(updateDesiredNodesRequest); final var backwardsUpdateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion() - 1, updateDesiredNodesRequest.getNodes(), @@ -123,6 +127,8 @@ public void testSameVersionWithDifferentContentIsForbidden() { updateDesiredNodes(updateDesiredNodesRequest); final var updateDesiredNodesRequestWithSameHistoryIdAndVersionAndDifferentSpecs = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion(), randomList(1, 10, DesiredNodesTestCase::randomDesiredNode), @@ -192,6 +198,8 @@ public void testNodeProcessorsGetValidatedWithDesiredNodeProcessors() { // This test verifies that the validation doesn't throw on desired nodes // with a higher number of available processors than the node running the tests. final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(1, 20), randomList( @@ -267,7 +275,7 @@ public void testDeleteDesiredNodesTasksAreBatchedCorrectly() throws Exception { future.actionGet(); } - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DesiredNodes latestDesiredNodes = DesiredNodes.latestFromClusterState(state); assertThat(latestDesiredNodes, is(nullValue())); } @@ -309,6 +317,8 @@ private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest() { private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest(Settings settings) { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(2, 20), randomList(2, 10, () -> randomDesiredNode(settings)), @@ -318,6 +328,8 @@ private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest(Settings setti private UpdateDesiredNodesRequest randomDryRunUpdateDesiredNodesRequest(Settings settings) { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(2, 20), randomList(2, 10, () -> randomDesiredNode(settings)), @@ -331,7 +343,7 @@ private void deleteDesiredNodes() { } private DesiredNodes getLatestDesiredNodes() { - final GetDesiredNodesAction.Request request = new GetDesiredNodesAction.Request(); + final GetDesiredNodesAction.Request request = new GetDesiredNodesAction.Request(TEST_REQUEST_TIMEOUT); final GetDesiredNodesAction.Response response = client().execute(GetDesiredNodesAction.INSTANCE, request).actionGet(); return response.getDesiredNodes(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 32d8be475dbbe..180bef7ea4098 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -141,7 +141,7 @@ public void testMasterNodeOperationTasks() throws Exception { registerTaskManagerListeners(TransportClusterHealthAction.NAME); // First run the health on the master node - should produce only one task on the master node - internalCluster().masterClient().admin().cluster().prepareHealth().get(); + internalCluster().masterClient().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertEquals(1, numberOfEvents(TransportClusterHealthAction.NAME, Tuple::v1)); // counting only registration events // counting only unregistration events // When checking unregistration events there might be some delay since receiving the response from the cluster doesn't @@ -151,7 +151,7 @@ public void testMasterNodeOperationTasks() throws Exception { resetTaskManagerListeners(TransportClusterHealthAction.NAME); // Now run the health on a non-master node - should produce one task on master and one task on another node - internalCluster().nonMasterClient().admin().cluster().prepareHealth().get(); + internalCluster().nonMasterClient().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertEquals(2, numberOfEvents(TransportClusterHealthAction.NAME, Tuple::v1)); // counting only registration events // counting only unregistration events assertBusy(() -> assertEquals(2, numberOfEvents(TransportClusterHealthAction.NAME, event -> event.v1() == false))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index bb2c97ec9aa69..85dd1337204b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -49,7 +49,7 @@ protected Collection> nodePlugins() { public void testNonLocalRequestAlwaysFindsMaster() throws Exception { runRepeatedlyWhileChangingMaster(() -> { - final ClusterStateRequestBuilder clusterStateRequestBuilder = clusterAdmin().prepareState() + final ClusterStateRequestBuilder clusterStateRequestBuilder = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setBlocks(true) @@ -69,7 +69,7 @@ public void testLocalRequestAlwaysSucceeds() throws Exception { final String node = randomFrom(internalCluster().getNodeNames()); final DiscoveryNodes discoveryNodes = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setLocal(true) .setNodes(true) @@ -98,7 +98,7 @@ public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exc final long waitForMetadataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); final ClusterStateRequestBuilder clusterStateRequestBuilder = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) @@ -131,7 +131,7 @@ public void testLocalRequestWaitsForMetadata() throws Exception { final long waitForMetadataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); final ClusterStateResponse clusterStateResponse = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setLocal(true) .setMetadata(true) @@ -156,7 +156,7 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception assertBusy( () -> assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setBlocks(true) @@ -188,7 +188,7 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception assertAcked( client(nonMasterNode).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value)) ); } @@ -225,17 +225,22 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception public void testFailsWithBlockExceptionIfBlockedAndBlocksNotRequested() { internalCluster().startMasterOnlyNode(Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 1).build()); - final var state = safeGet(clusterAdmin().prepareState().clear().setBlocks(true).execute()).getState(); + final var state = safeGet(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setBlocks(true).execute()).getState(); assertTrue(state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat( - safeAwaitFailure(SubscribableListener.newForked(l -> clusterAdmin().prepareState().clear().execute(l))), + safeAwaitFailure( + SubscribableListener.newForked( + l -> clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().execute(l) + ) + ), instanceOf(ClusterBlockException.class) ); internalCluster().startDataOnlyNode(); - final var recoveredState = safeGet(clusterAdmin().prepareState().clear().setBlocks(randomBoolean()).execute()).getState(); + final var recoveredState = safeGet(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setBlocks(randomBoolean()).execute()) + .getState(); assertFalse(recoveredState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 0c3dac0f99b6c..2385c42526d40 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -64,7 +64,8 @@ private void assertCounts(ClusterStatsNodes.Counts counts, int total, Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String mergeNode = discoveryNodes[0].getName(); @@ -158,7 +158,11 @@ public void testShrinkIndexPrimaryTerm() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); - final Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + final Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getDataNodes(); assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); final String mergeNode = discoveryNodes[0].getName(); @@ -222,7 +226,10 @@ public void testShrinkIndexPrimaryTerm() throws Exception { } private static IndexMetadata indexMetadata(final Client client, final String index) { - final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); return clusterStateResponse.getState().metadata().index(index); } @@ -236,7 +243,7 @@ public void testCreateShrinkIndex() { for (int i = 0; i < docs; i++) { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -272,7 +279,7 @@ public void testCreateShrinkIndex() { assertNoResizeSourceIndexSettings("target"); // resolve true merge node - this is not always the node we required as all shards may be on another node - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); logger.info("merge node {}", mergeNode); @@ -342,7 +349,7 @@ public void testCreateShrinkIndexFails() throws Exception { for (int i = 0; i < 20; i++) { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String spareNode = discoveryNodes[0].getName(); @@ -369,7 +376,7 @@ public void testCreateShrinkIndexFails() throws Exception { .build() ) .get(); - clusterAdmin().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "target").setWaitForEvents(Priority.LANGUID).get(); // now we move all shards away from the merge node updateIndexSettings( @@ -382,7 +389,7 @@ public void testCreateShrinkIndexFails() throws Exception { updateIndexSettings(Settings.builder().putNull("index.routing.allocation.exclude._name"), "target"); // wait until it fails assertBusy(() -> { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); RoutingTable routingTables = clusterStateResponse.getState().routingTable(); assertTrue(routingTables.index("target").shard(0).shard(0).unassigned()); assertEquals( @@ -427,7 +434,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { for (int i = 0; i < 20; i++) { prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String mergeNode = discoveryNodes[0].getName(); @@ -482,7 +489,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } indicesAdmin().prepareFlush("source").get(); - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due @@ -508,7 +515,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { ensureGreen(); assertNoResizeSourceIndexSettings("target"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target"); indicesAdmin().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); IndicesSegmentResponse targetSegStats = indicesAdmin().prepareSegments("target").get(); @@ -601,7 +608,7 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { } static void assertNoResizeSourceIndexSettings(final String index) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState() + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .clear() .setMetadata(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 22549a1562dcd..41646496c59c4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -333,7 +333,10 @@ public void testSplitIndexPrimaryTerm() throws Exception { } private static IndexMetadata indexMetadata(final Client client, final String index) { - final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); return clusterStateResponse.getState().metadata().index(index); } @@ -371,7 +374,7 @@ public void testCreateSplitIndex() throws Exception { ensureGreen(); assertNoResizeSourceIndexSettings("target"); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); logger.info("split node {}", mergeNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 16f8f51cb8aae..becea454b7d58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -83,7 +83,7 @@ public void testRolloverOnEmptyIndex() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-1"); if (explicitWriteIndex) { assertTrue(oldIndex.getAliases().containsKey("test_alias")); @@ -106,7 +106,7 @@ public void testRollover() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); assertFalse(oldIndex.getAliases().containsKey("test_alias")); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); @@ -139,7 +139,7 @@ public void testRolloverWithExplicitWriteIndex() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); assertFalse(oldIndex.getAliases().get("test_alias").writeIndex()); @@ -187,7 +187,7 @@ public void testRolloverWithIndexSettings() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); assertThat(newIndex.getNumberOfShards(), equalTo(1)); @@ -220,7 +220,7 @@ public void testRolloverWithIndexSettingsWithoutPrefix() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); assertThat(newIndex.getNumberOfShards(), equalTo(1)); @@ -268,7 +268,7 @@ public void testRolloverDryRun() throws Exception { assertThat(response.isDryRun(), equalTo(true)); assertThat(response.isRolledOver(), equalTo(false)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-1"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); final IndexMetadata newIndex = state.metadata().index("test_index-000002"); @@ -334,7 +334,7 @@ public void testRolloverConditionsNotMet() throws Exception { ) ); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-0"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); if (explicitWriteIndex) { @@ -361,7 +361,7 @@ public void testRolloverWithNewIndexName() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index"); final IndexMetadata newIndex = state.metadata().index("test_new_index"); assertTrue(newIndex.getAliases().containsKey("test_alias")); @@ -452,7 +452,7 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_size condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -466,7 +466,7 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_size condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat(metConditions.get(0).toString(), equalTo(new MaxSizeCondition(maxSizeValue).toString())); @@ -488,7 +488,11 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -513,7 +517,7 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_primary_shard_size condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -527,7 +531,7 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_primary_shard_size condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat(metConditions.get(0).toString(), equalTo(new MaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition).toString())); @@ -549,7 +553,11 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -573,7 +581,7 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_primary_shard_docs condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -587,7 +595,7 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_primary_shard_docs condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat( @@ -610,7 +618,11 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -698,7 +710,7 @@ public void testRolloverWithHiddenAliasesAndExplicitWriteIndex() { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index(firstIndexName); assertTrue(oldIndex.getAliases().containsKey(aliasName)); assertTrue(oldIndex.getAliases().get(aliasName).isHidden()); @@ -732,7 +744,7 @@ public void testRolloverWithHiddenAliasesAndImplicitWriteIndex() { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index(firstIndexName); assertFalse(oldIndex.getAliases().containsKey(aliasName)); final IndexMetadata newIndex = state.metadata().index(secondIndexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 1a070c8bd0de3..e6b042c059f41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -88,10 +88,10 @@ public void testBasic() throws Exception { logger.info("--> disable allocation"); disableAllocation(index); logger.info("--> stop random node"); - int num = clusterAdmin().prepareState().get().getState().nodes().getSize(); + int num = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getSize(); internalCluster().stopNode(internalCluster().getNodeNameThat(new IndexNodePredicate(index))); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("" + (num - 1))); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("" + (num - 1))); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List unassignedShards = clusterState.routingTable().index(index).shardsWithState(ShardRoutingState.UNASSIGNED); response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); @@ -227,7 +227,7 @@ public boolean test(Settings settings) { } private Set findNodesWithShard(String index) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexRoutingTable indexRoutingTable = state.routingTable().index(index); List startedShards = indexRoutingTable.shardsWithState(ShardRoutingState.STARTED); Set nodesNamesWithShard = new HashSet<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java index 85b720a03478e..d8797f3c64575 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -28,7 +28,7 @@ public void testBulkProcessorAutoCreateRestrictions() { internalCluster().startNode(settings); createIndex("willwork"); - clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "willwork").setWaitForGreenStatus().get(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); bulkRequestBuilder.add(prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java index 4a56a6ce8ddb6..573d929ee30a9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java @@ -80,7 +80,7 @@ public void testMappingValidationIndexExists() { SearchResponse searchResponse = client().search(new SearchRequest(indexName)).actionGet(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); searchResponse.decRef(); - ClusterStateResponse clusterStateResponse = admin().cluster().state(new ClusterStateRequest()).actionGet(); + ClusterStateResponse clusterStateResponse = admin().cluster().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); Map indexMapping = clusterStateResponse.getState().metadata().index(indexName).mapping().sourceAsMap(); Map fields = (Map) indexMapping.get("properties"); assertThat(fields.size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java index 39273e9d1712b..023fa54fef9ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java @@ -134,7 +134,7 @@ public void testCreateIndexStopsWaitingWhenIndexDeleted() throws Exception { .execute(); logger.info("--> wait until the cluster state contains the new index"); - assertBusy(() -> assertTrue(clusterAdmin().prepareState().get().getState().metadata().hasIndex(indexName))); + assertBusy(() -> assertTrue(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().hasIndex(indexName))); logger.info("--> delete the index"); assertAcked(indicesAdmin().prepareDelete(indexName)); @@ -148,7 +148,7 @@ public void testCreateIndexStopsWaitingWhenIndexDeleted() throws Exception { // only after the test cleanup does the index creation manifest in the cluster state. To take care of this problem // and its potential ramifications, we wait here for the index creation cluster state update task to finish private void waitForIndexCreationToComplete(final String indexName) { - clusterAdmin().prepareHealth(indexName).setWaitForEvents(Priority.URGENT).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForEvents(Priority.URGENT).get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index 6737d02434c0f..bb970f69ead18 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -53,7 +53,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { allowNodes("test", 2); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(2) .setWaitForYellowStatus() @@ -90,7 +90,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { } allowNodes("test", 3); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(3) .setWaitForGreenStatus() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java index e568b51e43b2e..321c1c84d5cb1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java @@ -79,7 +79,13 @@ public void testRoutingLoopProtection() { try { final var newMaster = ensureSufficientMasterEligibleNodes(); - final long originalTerm = internalCluster().masterClient().admin().cluster().prepareState().get().getState().term(); + final long originalTerm = internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .term(); final var previousMasterKnowsNewMasterIsElectedLatch = configureElectionLatch(newMaster, cleanupTasks); final var newMasterReceivedReroutedMessageFuture = new PlainActionFuture<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 2f10711db7371..91903fd700034 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -202,7 +202,7 @@ public void testFilteringAliases() throws Exception { // For now just making sure that filter was stored with the alias logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]"); - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexMetadata indexMd = clusterState.metadata().index("test"); assertThat(indexMd.getAliases().get("alias1").filter().string(), equalTo(""" {"term":{"user":{"value":"kimchy"}}}""")); @@ -1416,21 +1416,33 @@ private void assertAliasesVersionIncreases(final String index, final Runnable ru private void assertAliasesVersionIncreases(final String[] indices, final Runnable runnable) { final var beforeAliasesVersions = new HashMap(indices.length); - final var beforeMetadata = admin().cluster().prepareState().get().getState().metadata(); + final var beforeMetadata = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); for (final var index : indices) { beforeAliasesVersions.put(index, beforeMetadata.index(index).getAliasesVersion()); } runnable.run(); - final var afterMetadata = admin().cluster().prepareState().get().getState().metadata(); + final var afterMetadata = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); for (final String index : indices) { assertThat(afterMetadata.index(index).getAliasesVersion(), equalTo(1 + beforeAliasesVersions.get(index))); } } private void assertAliasesVersionUnchanged(final String index, final Runnable runnable) { - final long beforeAliasesVersion = admin().cluster().prepareState().get().getState().metadata().index(index).getAliasesVersion(); + final long beforeAliasesVersion = admin().cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(index) + .getAliasesVersion(); runnable.run(); - final long afterAliasesVersion = admin().cluster().prepareState().get().getState().metadata().index(index).getAliasesVersion(); + final long afterAliasesVersion = admin().cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(index) + .getAliasesVersion(); assertThat(afterAliasesVersion, equalTo(beforeAliasesVersion)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index c5c3e441363da..5173577827154 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -298,7 +298,7 @@ public void testAddBlockToUnassignedIndex() throws Exception { .setSettings(Settings.builder().put("index.routing.allocation.include._name", "nothing").build()) ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().indices().get(indexName).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().allShards().allMatch(ShardRouting::unassigned), is(true)); @@ -393,7 +393,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } indices[i] = indexName; } - assertThat(clusterAdmin().prepareState().get().getState().metadata().indices().size(), equalTo(indices.length)); + assertThat(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().size(), equalTo(indices.length)); final List threads = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(1); @@ -434,7 +434,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } static void assertIndexHasBlock(APIBlock block, final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); final Settings indexSettings = indexMetadata.getSettings(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index 65c0aa6548182..a190ac61bbe18 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -43,7 +43,7 @@ public void testSimpleLocalHealth() { logger.info("--> getting cluster health on [{}]", node); final ClusterHealthResponse health = client(node).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setLocal(true) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(30)) @@ -56,7 +56,7 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1") + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(1)) .get(); @@ -65,7 +65,10 @@ public void testHealth() { assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); logger.info("--> running cluster wide health"); - healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout(TimeValue.timeValueSeconds(10)).get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -74,13 +77,16 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout(TimeValue.timeValueSeconds(10)).get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1") + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = clusterAdmin().prepareHealth("test1", "test2") + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1", "test2") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(1)) .get(); @@ -93,7 +99,7 @@ public void testHealth() { public void testHealthWithClosedIndices() { createIndex("index-1"); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -103,7 +109,7 @@ public void testHealthWithClosedIndices() { assertAcked(indicesAdmin().prepareClose("index-2")); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(2)); @@ -111,21 +117,21 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-1").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-2").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(2)); @@ -133,7 +139,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -143,7 +149,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2"), nullValue()); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -157,7 +163,7 @@ public void testHealthWithClosedIndices() { assertAcked(indicesAdmin().prepareClose("index-3")); { - ClusterHealthResponse response = clusterAdmin().prepareHealth() + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForNoInitializingShards(true) .setWaitForYellowStatus() @@ -170,28 +176,28 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-1").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-2").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-3").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-3").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(3)); @@ -200,7 +206,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -211,7 +217,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3"), nullValue()); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -224,7 +230,7 @@ public void testHealthWithClosedIndices() { setReplicaCount(numberOfReplicas(), "index-3"); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(3)); @@ -240,7 +246,7 @@ public void testHealthOnIndexCreation() throws Exception { @Override public void run() { while (finished.get() == false) { - ClusterHealthResponse health = clusterAdmin().prepareHealth().get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(health.getStatus(), not(equalTo(ClusterHealthStatus.RED))); } } @@ -254,7 +260,7 @@ public void run() { } public void testWaitForEventsRetriesIfOtherConditionsNotMet() { - final ActionFuture healthResponseFuture = clusterAdmin().prepareHealth("index") + final ActionFuture healthResponseFuture = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .execute(); @@ -286,7 +292,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) try { createIndex("index"); - assertFalse(clusterAdmin().prepareHealth("index").setWaitForGreenStatus().get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").setWaitForGreenStatus().get().isTimedOut()); // at this point the original health response should not have returned: there was never a point where the index was green AND // the master had processed all pending tasks above LANGUID priority. @@ -326,7 +332,7 @@ public void testHealthOnMasterFailover() throws Exception { responseFutures.add( client(node).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setMasterNodeTimeout(TimeValue.timeValueMinutes(timeoutMinutes)) @@ -369,7 +375,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); try { - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(1)) .get(TimeValue.timeValueSeconds(30)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index cc930cdad5950..f0801d01a70d9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -206,7 +206,7 @@ public void testClusterInfoServiceInformationClearOnError() { prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); ensureGreen("test"); - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -320,7 +320,12 @@ public void testClusterInfoServiceInformationClearOnError() { assertThat("size for shard " + shardRouting + " found", originalInfo.getShardSize(shardRouting), notNullValue()); } - RoutingTable routingTable = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState().routingTable(); + RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .get() + .getState() + .routingTable(); for (ShardRouting shard : routingTable.allShardsIterator()) { assertTrue( infoAfterRecovery.getReservedSpace(shard.currentNodeId(), infoAfterRecovery.getDataPath(shard)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java index aaf663c8c5b24..382d7aa8eb647 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java @@ -55,11 +55,15 @@ private UpdateDesiredNodesResponse updateDesiredNodes(UpdateDesiredNodesRequest } private DesiredNodes getLatestDesiredNodes() { - return client().execute(GetDesiredNodesAction.INSTANCE, new GetDesiredNodesAction.Request()).actionGet().getDesiredNodes(); + return client().execute(GetDesiredNodesAction.INSTANCE, new GetDesiredNodesAction.Request(TEST_REQUEST_TIMEOUT)) + .actionGet() + .getDesiredNodes(); } private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest() { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), randomIntBetween(1, 10), randomList( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java index 77fcdb446baa7..08dbd800ede5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java @@ -35,6 +35,8 @@ public void testDesiredNodesStatusIsTracked() { final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -43,11 +45,13 @@ public void testDesiredNodesStatusIsTracked() { updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } final var newVersionUpdateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion() + 1, updateDesiredNodesRequest.getNodes(), @@ -56,7 +60,7 @@ public void testDesiredNodesStatusIsTracked() { updateDesiredNodes(newVersionUpdateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } } @@ -70,6 +74,8 @@ public void testIdempotentUpdateWithUpdatedStatus() { final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -78,14 +84,14 @@ public void testIdempotentUpdateWithUpdatedStatus() { updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } } @@ -99,6 +105,8 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -106,7 +114,7 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor ); updateDesiredNodes(updateDesiredNodesRequest); - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); final var leavingNodeNames = randomSubsetOf(nodeNames); @@ -114,7 +122,7 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor internalCluster().stopNode(leavingNodeName); } - final var newClusterState = clusterAdmin().prepareState().get().getState(); + final var newClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final var latestDesiredNodes = DesiredNodes.latestFromClusterState(newClusterState); for (String leavingNodeName : leavingNodeNames) { @@ -132,6 +140,8 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -139,7 +149,7 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti ); updateDesiredNodes(updateDesiredNodesRequest); - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); // Stop some nodes, these shouldn't be actualized within the new desired node's history until they join back @@ -149,6 +159,8 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti } final var updateDesiredNodesWithNewHistoryRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, updateDesiredNodesRequest.getNodes(), @@ -157,7 +169,7 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti final var response = updateDesiredNodes(updateDesiredNodesWithNewHistoryRequest); assertThat(response.hasReplacedExistingHistoryId(), is(equalTo(true))); - final var updatedClusterState = clusterAdmin().prepareState().get().getState(); + final var updatedClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final var latestDesiredNodes = DesiredNodes.latestFromClusterState(updatedClusterState); for (String clusterNodeName : clusterNodeNames) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index d3cbab2760747..a3c7f8b77a444 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -67,25 +67,25 @@ public void testTwoNodesNoMasterBlock() throws Exception { String node1Name = internalCluster().startNode(settings); logger.info("--> should be blocked, no master..."); - ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state logger.info("--> start second node, cluster should be formed"); String node2Name = internalCluster().startNode(settings); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(false)); @@ -97,7 +97,10 @@ public void testTwoNodesNoMasterBlock() throws Exception { } // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().getActiveShards(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForActiveShards(numShards.totalNumShards) + .get() + .getActiveShards(), equalTo(numShards.totalNumShards) ); // flush for simpler debugging @@ -111,17 +114,20 @@ public void testTwoNodesNoMasterBlock() throws Exception { String masterNode = internalCluster().getMasterName(); String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(otherNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, otherNode) + ).get(); logger.info("--> stop master node, no master block should appear"); Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopNode(masterNode); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no master assertThat(state.nodes().getSize(), equalTo(2)); @@ -130,19 +136,19 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> starting the previous master node again..."); node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build()); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes("2") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -154,20 +160,23 @@ public void testTwoNodesNoMasterBlock() throws Exception { } logger.info("--> clearing voting config exclusions"); - ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest(); + ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT); clearRequest.setWaitForRemoval(false); client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearRequest).get(); masterNode = internalCluster().getMasterName(); otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, masterNode) + ).get(); logger.info("--> stop non-master node, no master block should appear"); Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopNode(otherNode); assertBusy(() -> { - ClusterState state1 = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state1 = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -175,19 +184,19 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build()); ensureGreen(); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -212,7 +221,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { - ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state1 = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); } }); @@ -221,13 +230,13 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().startNode(settings); ensureGreen(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); createIndex("test"); @@ -239,7 +248,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { ensureGreen(); // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), equalTo(false) ); // flush for simpler debugging @@ -262,7 +271,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { logger.info("--> verify that there is no master anymore on remaining node"); // spin here to wait till the state is set assertBusy(() -> { - ClusterState st = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState st = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(st.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -272,7 +281,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().validateClusterFormed(); ensureGreen(); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); logger.info("--> verify we the data back"); @@ -338,7 +347,7 @@ public void onFailure(Exception e) { DiscoveryNode masterNode = internalCluster().client(randomFrom(otherNodes)) .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .nodes() @@ -350,7 +359,7 @@ public void onFailure(Exception e) { partition.stopDisrupting(); logger.debug("--> waiting for cluster to heal"); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForEvents(Priority.LANGUID)); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").setWaitForEvents(Priority.LANGUID)); for (String node : internalCluster().getNodeNames()) { Settings nodeSetting = internalCluster().clusterService(node).state().metadata().settings(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index d8c91d770437f..6b104291693e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -72,7 +72,7 @@ public void testNoMasterActions() throws Exception { final List nodes = internalCluster().startNodes(3, settings); createIndex("test"); - clusterAdmin().prepareHealth("test").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForGreenStatus().get(); final NetworkDisruption disruptionScheme = new NetworkDisruption( new IsolateAllNodes(new HashSet<>(nodes)), @@ -84,7 +84,12 @@ public void testNoMasterActions() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = clientToMasterlessNode.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -223,14 +228,14 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 2)).get(); prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); - clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "_all").setWaitForGreenStatus().get(); prepareIndex("test1").setId("1").setSource("field", "value1").get(); prepareIndex("test2").setId("1").setSource("field", "value1").get(); refresh(); ensureSearchable("test1", "test2"); - ClusterStateResponse clusterState = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); logger.info("Cluster state:\n{}", clusterState.getState()); final NetworkDisruption disruptionScheme = new NetworkDisruption( @@ -243,7 +248,12 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = clientToMasterlessNode.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -299,13 +309,13 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { final List nodes = internalCluster().startNodes(3, settings); prepareCreate("test1").setSettings(indexSettings(1, 1)).get(); - clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "_all").setWaitForGreenStatus().get(); prepareIndex("test1").setId("1").setSource("field", "value1").get(); refresh(); ensureGreen("test1"); - ClusterStateResponse clusterState = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); logger.info("Cluster state:\n{}", clusterState.getState()); final List nodesWithShards = clusterState.getState() @@ -321,7 +331,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { client().execute( TransportAddVotingConfigExclusionsAction.TYPE, - new AddVotingConfigExclusionsRequest(nodesWithShards.toArray(new String[0])) + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, nodesWithShards.toArray(new String[0])) ).get(); ensureGreen("test1"); @@ -336,7 +346,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { assertBusy(() -> { for (String node : nodesWithShards) { - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 3135647adc9ab..6b5bb08f0f247 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -62,7 +62,8 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { case 2 -> req.setExternalIds(internalCluster().clusterService(nodeName).localNode().getExternalId()); default -> throw new IllegalStateException("Unexpected value"); } - PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build()).get(); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build(TEST_REQUEST_TIMEOUT)) + .get(); assertTrue(resp.getPrevalidation().isSafe()); assertThat(resp.getPrevalidation().message(), equalTo("cluster status is not RED")); assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); @@ -75,7 +76,7 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { // Enforce a replica to get unassigned updateIndexSettings(Settings.builder().put("index.routing.allocation.require._name", node1), indexName); ensureYellow(); - PrevalidateNodeRemovalRequest req2 = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(); + PrevalidateNodeRemovalRequest req2 = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp2 = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req2).get(); assertTrue(resp2.getPrevalidation().isSafe()); assertThat(resp2.getPrevalidation().message(), equalTo("cluster status is not RED")); @@ -107,7 +108,7 @@ public void testNodeRemovalFromRedClusterWithNoLocalShardCopy() throws Exception internalCluster().stopNode(nodeWithIndex); ensureRed(indexName); String[] otherNodeNames = otherNodes.toArray(new String[otherNodes.size()]); - PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(otherNodeNames).build(); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(otherNodeNames).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); assertTrue(resp.getPrevalidation().isSafe()); assertThat(resp.getPrevalidation().message(), equalTo("")); @@ -154,7 +155,7 @@ public void testNodeRemovalFromRedClusterWithLocalShardCopy() throws Exception { ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, 0), ""); assertNotNull("local index shards not found", shardPath); // Prevalidate removal of node1 - PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node1).build(); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node1).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); String node1Id = getNodeId(node1); assertFalse(resp.getPrevalidation().isSafe()); @@ -183,7 +184,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { }); PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder() .setNames(node2) - .build() + .build(TEST_REQUEST_TIMEOUT) .masterNodeTimeout(TimeValue.timeValueSeconds(1)) .timeout(TimeValue.timeValueSeconds(1)); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); @@ -203,7 +204,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { private void ensureRed(String indexName) throws Exception { assertBusy(() -> { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForStatus(ClusterHealthStatus.RED) .setWaitForEvents(Priority.LANGUID) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 3dba41adec08b..1259650d37791 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -76,13 +76,16 @@ public void indexData() throws Exception { } public void testRoutingTable() throws Exception { - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().clear().setRoutingTable(true).get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .get(); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("foo"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("fuu"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("baz"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("non-existent"), is(false)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponse.getState().routingTable().hasIndex("foo"), is(false)); assertThat(clusterStateResponse.getState().routingTable().hasIndex("fuu"), is(false)); assertThat(clusterStateResponse.getState().routingTable().hasIndex("baz"), is(false)); @@ -90,43 +93,49 @@ public void testRoutingTable() throws Exception { } public void testNodes() throws Exception { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setNodes(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setNodes(true).get(); assertThat(clusterStateResponse.getState().nodes().getNodes().size(), is(cluster().size())); - ClusterStateResponse clusterStateResponseFiltered = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponseFiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponseFiltered.getState().nodes().getNodes().size(), is(0)); } public void testMetadata() throws Exception { - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .get(); assertThat(clusterStateResponseUnfiltered.getState().metadata().indices().size(), is(3)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(0)); } public void testMetadataVersion() { createIndex("index-1"); createIndex("index-2"); - long baselineVersion = clusterAdmin().prepareState().get().getState().metadata().version(); + long baselineVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().version(); assertThat(baselineVersion, greaterThan(0L)); assertThat( - clusterAdmin().prepareState().setIndices("index-1").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("index-1").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("index-2").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("index-2").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("*").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("*").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("not-found").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("not-found").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); - assertThat(clusterAdmin().prepareState().clear().setMetadata(false).get().getState().metadata().version(), equalTo(0L)); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(false).get().getState().metadata().version(), + equalTo(0L) + ); } public void testIndexTemplates() throws Exception { @@ -170,7 +179,7 @@ public void testIndexTemplates() throws Exception { ) .get(); - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterStateResponseUnfiltered.getState().metadata().templates().size(), is(greaterThanOrEqualTo(2))); GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates("foo_template").get(); @@ -198,7 +207,7 @@ public void testThatFilteringByIndexWorksForMetadataAndRoutingTable() throws Exc * that the cluster state returns coherent data for both routing table and metadata. */ private void testFilteringByIndexWorks(String[] indices, String[] expected) { - ClusterStateResponse clusterState = clusterAdmin().prepareState() + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setRoutingTable(true) @@ -262,19 +271,23 @@ public void testLargeClusterStatePublishing() throws Exception { } public void testIndicesOptions() throws Exception { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).setIndices("f*").get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("f*") + .get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(2)); ensureGreen("fuu"); // close one index assertAcked(indicesAdmin().close(new CloseIndexRequest("fuu")).get()); - clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).setIndices("f*").get(); + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).setIndices("f*").get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(1)); assertThat(clusterStateResponse.getState().metadata().index("foo").getState(), equalTo(IndexMetadata.State.OPEN)); // expand_wildcards_closed should toggle return only closed index fuu IndicesOptions expandCloseOptions = IndicesOptions.fromOptions(false, true, false, true); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("f*") @@ -285,7 +298,7 @@ public void testIndicesOptions() throws Exception { // ignore_unavailable set to true should not raise exception on fzzbzz IndicesOptions ignoreUnavailabe = IndicesOptions.fromOptions(true, true, true, false); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("fzzbzz") @@ -296,7 +309,7 @@ public void testIndicesOptions() throws Exception { // empty wildcard expansion result should work when allowNoIndices is // turned on IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("a*") @@ -309,7 +322,12 @@ public void testIndicesOptionsOnAllowNoIndicesFalse() throws Exception { // empty wildcard expansion throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, false, true, false); try { - clusterAdmin().prepareState().clear().setMetadata(true).setIndices("a*").setIndicesOptions(allowNoIndices).get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("a*") + .setIndicesOptions(allowNoIndices) + .get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [a*]")); @@ -320,7 +338,12 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { // ignore_unavailable set to false throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); try { - clusterAdmin().prepareState().clear().setMetadata(true).setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("fzzbzz") + .setIndicesOptions(allowNoIndices) + .get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [fzzbzz]")); @@ -330,7 +353,7 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { public void testPrivateCustomsAreExcluded() throws Exception { // ensure that the custom is injected into the cluster state assertBusy(() -> assertTrue(clusterService().state().customs().containsKey("test"))); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setCustoms(true).get(); assertFalse(clusterStateResponse.getState().customs().containsKey("test")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index 8a239f7293e22..58daca22303cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -47,7 +47,12 @@ public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); @@ -62,7 +67,12 @@ public void testIndexingBeforeAndAfterDataNodesStart() { // now, start a node data, and see that it gets with shards internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("3") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); @@ -76,7 +86,9 @@ public void testShardsAllocatedAfterDataNodesStart() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); @@ -84,7 +96,7 @@ public void testShardsAllocatedAfterDataNodesStart() { internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() @@ -100,7 +112,9 @@ public void testAutoExpandReplicasAdjustedWhenDataNodeJoins() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 538f5e7a1640d..8cdc49d3b12d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -37,7 +37,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -52,11 +52,27 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start master node"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); @@ -66,7 +82,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -84,11 +100,27 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { Settings.builder().put(nonDataNode(masterNode())).put(masterDataPathSettings) ); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligibleNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligibleNodeName) ); } @@ -99,7 +131,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -114,45 +146,112 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start master node (1)"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); logger.info("--> start master node (2)"); final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); logger.info("--> closing master node (1)"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNodeName)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, masterNodeName) + ).get(); // removing the master from the voting configuration immediately triggers the master to step down assertBusy(() -> { assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); }); internalCluster().stopNode(masterNodeName); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index 64ac8318dce23..05b58ea2f8808 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -27,7 +27,7 @@ public void testUpdateSettingsValidation() throws Exception { createIndex("test"); NumShards test = getNumShards("test"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test") + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .setWaitForGreenStatus() @@ -36,7 +36,10 @@ public void testUpdateSettingsValidation() throws Exception { assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); setReplicaCount(0, "test"); - healthResponse = clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index 71418cb83debe..36d903205f05c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -71,7 +71,7 @@ public void testSimpleAwareness() throws Exception { // On slow machines the initial relocation might be delayed assertBusy(() -> { logger.info("--> waiting for no relocation"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test1", "test2") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -82,7 +82,7 @@ public void testSimpleAwareness() throws Exception { assertThat("Cluster health request timed out", clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> checking current state"); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); // check that closed indices are effectively closed final List notClosedIndices = indicesToClose.stream() @@ -115,7 +115,7 @@ public void testAwarenessZones() { String A_1 = nodes.get(3); logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("4").get(); assertThat(health.isTimedOut(), equalTo(false)); createIndex("test", 5, 1); @@ -125,7 +125,7 @@ public void testAwarenessZones() { } logger.info("--> waiting for shards to be allocated"); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -133,7 +133,7 @@ public void testAwarenessZones() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_1), anyOf(equalTo(2), equalTo(3))); @@ -162,7 +162,7 @@ public void testAwarenessZonesIncrementalNodes() { assertAcked(indicesAdmin().prepareClose("test")); } - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -170,7 +170,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNoRelocatingShards(true) .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -178,7 +178,7 @@ public void testAwarenessZonesIncrementalNodes() { logger.info("--> starting another node in zone 'b'"); String B_1 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -186,7 +186,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); ClusterRerouteUtils.reroute(client()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -196,7 +196,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -204,7 +204,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); String noZoneNode = internalCluster().startNode(); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -212,7 +212,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); ClusterRerouteUtils.reroute(client()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -222,7 +222,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -230,7 +230,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.containsKey(noZoneNode), equalTo(false)); updateClusterSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "")); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -240,7 +240,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(3)); @@ -254,7 +254,8 @@ public void testForceAwarenessSettingValidation() { final IllegalArgumentException illegalArgumentException = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) ); assertThat(illegalArgumentException.getMessage(), containsString("[cluster.routing.allocation.awareness.force.]")); assertThat(illegalArgumentException.getCause(), instanceOf(SettingsException.class)); @@ -263,7 +264,8 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.not_values]") ); @@ -271,7 +273,8 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.values.junk]") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index dc93aaa814018..da585d1bb67d4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -96,7 +96,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, *under dry_run*"); @@ -115,7 +115,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { ); logger.info("--> get the state, verify nothing changed because of the dry run"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); @@ -132,7 +132,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { equalTo(ShardRoutingState.INITIALIZING) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -140,7 +140,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -165,7 +165,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { equalTo(ShardRoutingState.INITIALIZING) ); - healthResponse = clusterAdmin().prepareHealth() + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -174,7 +174,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), @@ -209,7 +209,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(4)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("4").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create indices"); @@ -239,7 +239,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc String node_1 = internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate"); @@ -253,7 +253,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); @@ -270,7 +270,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc equalTo(ShardRoutingState.INITIALIZING) ); - healthResponse = clusterAdmin().prepareHealth() + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -278,7 +278,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -306,7 +306,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // TODO can we get around this? the cluster is RED, so what do we wait for? ClusterRerouteUtils.reroute(client()); assertThat( - clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setIndices("test").setWaitForNodes("2").get().getStatus(), equalTo(ClusterHealthStatus.RED) ); logger.info("--> explicitly allocate primary"); @@ -326,7 +326,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId(); assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); }); @@ -339,7 +339,7 @@ public void testRerouteExplain() { String node_1 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard"); @@ -356,7 +356,7 @@ public void testRerouteExplain() { logger.info("--> starting a second node"); String node_2 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> try to move the shard from node1 to node2"); @@ -385,12 +385,12 @@ public void testMessageLogging() { final String nodeName1 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String nodeName2 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; @@ -474,7 +474,7 @@ public void testClusterRerouteWithBlocks() { ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List shards = state.routingTable().allShards("test-blocks"); assertThat(shards, hasSize(1)); @@ -504,7 +504,7 @@ public void testClusterRerouteWithBlocks() { new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test-blocks") .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 5f54b32ab4a14..abce3ce30fbad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -66,7 +66,7 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -93,7 +93,7 @@ public void testAutoExpandReplicasToFilteredNodes() { logger.info("--> creating an index with auto-expand replicas"); createIndex("test", Settings.builder().put(AutoExpandReplicas.SETTING.getKey(), "0-all").build()); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(1)); ensureGreen("test"); @@ -106,7 +106,7 @@ public void testAutoExpandReplicasToFilteredNodes() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(0)); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -142,7 +142,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); } - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -165,7 +165,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify all shards are allocated on node_1 now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); indexRoutingTable = clusterState.routingTable().index("test"); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -180,7 +180,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify that there are shards allocated on both nodes now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2)); } @@ -193,7 +193,7 @@ public void testInvalidIPFilterClusterSettings() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); @@ -221,12 +221,12 @@ public void testTransientSettingsStillApplied() { .build(); logger.info("--> updating settings"); - clusterAdmin().prepareUpdateSettings().setTransientSettings(exclude).get(); + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(exclude).get(); logger.info("--> waiting for relocation"); waitForRelocation(ClusterHealthStatus.GREEN); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (ShardRouting shard : RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED)) { String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); @@ -243,12 +243,15 @@ public void testTransientSettingsStillApplied() { Settings other = Settings.builder().put("cluster.info.update.interval", "45s").build(); logger.info("--> updating settings with random persistent setting"); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(other).setTransientSettings(exclude).get(); + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(other) + .setTransientSettings(exclude) + .get(); logger.info("--> waiting for relocation"); waitForRelocation(ClusterHealthStatus.GREEN); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); // The transient settings still exist in the state assertThat(state.metadata().transientSettings(), equalTo(exclude)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index f66430871c9d8..6a3d2f2fe5210 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -33,7 +33,7 @@ public void testSaneAllocation() { } ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { if (node.isEmpty() == false) { @@ -42,7 +42,7 @@ public void testSaneAllocation() { } setReplicaCount(0, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { @@ -60,7 +60,7 @@ public void testSaneAllocation() { setReplicaCount(1, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java index 97112b97cc130..eebd059ed13b5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java @@ -33,7 +33,13 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { private static void assertClusterUuid(boolean expectCommitted, String expectedValue) { for (String nodeName : internalCluster().getNodeNames()) { - final Metadata metadata = client(nodeName).admin().cluster().prepareState().setLocal(true).get().getState().metadata(); + final Metadata metadata = client(nodeName).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .metadata(); assertEquals(expectCommitted, metadata.clusterUUIDCommitted()); assertEquals(expectedValue, metadata.clusterUUID()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index a208656179339..bd12f570e136f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -135,7 +135,7 @@ public void testDeleteCreateInOneBulk() throws Exception { final var dataNode = internalCluster().startDataOnlyNode(); final var dataNodeClusterService = internalCluster().clusterService(dataNode); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java index 65484066ee9b9..a9948367f2780 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java @@ -46,7 +46,10 @@ public void testRemoveCustomsSuccessful() throws Exception { String node = internalCluster().startNode(); createIndex("test"); indicesAdmin().prepareDelete("test").get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); @@ -64,7 +67,10 @@ public void testRemoveCustomsSuccessful() throws Exception { assertThat(terminal.getOutput(), containsString("index-graveyard")); internalCluster().startNode(dataPathSettings); - assertEquals(0, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 0, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); } public void testCustomDoesNotMatch() throws Exception { @@ -72,7 +78,10 @@ public void testCustomDoesNotMatch() throws Exception { String node = internalCluster().startNode(); createIndex("test"); indicesAdmin().prepareDelete("test").get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java index 560ca3e8a548d..527d8b0a62fe4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java @@ -58,7 +58,7 @@ public void testRemoveSettingsSuccessful() throws Exception { Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) ); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()) ); Settings dataPathSettings = internalCluster().dataPathSettings(node); @@ -84,7 +84,7 @@ public void testRemoveSettingsSuccessful() throws Exception { internalCluster().startNode(dataPathSettings); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), not(contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey())) ); } @@ -96,7 +96,7 @@ public void testSettingDoesNotMatch() throws Exception { Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) ); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()) ); Settings dataPathSettings = internalCluster().dataPathSettings(node); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 00e171a7a132a..2c1ca5866fa46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -137,7 +137,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { .build() ); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -242,7 +242,13 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client(dataNode) + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -288,7 +294,13 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client(dataNode2) + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertFalse(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); }); @@ -333,7 +345,13 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { .build() ); - ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); internalCluster().stopNode(node); @@ -345,7 +363,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); - ClusterState state = internalCluster().client().admin().cluster().prepareState().get().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); internalCluster().stopCurrentMasterNode(); @@ -359,7 +377,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); - state = internalCluster().client().admin().cluster().prepareState().get().getState(); + state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java index b0cc81bf34811..6e21c3622ec45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java @@ -43,8 +43,11 @@ public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionExcept final String originalMaster = internalCluster().getMasterName(); logger.info("--> excluding master node {}", originalMaster); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(originalMaster)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, originalMaster) + ).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get(); assertNotEquals(originalMaster, internalCluster().getMasterName()); } @@ -60,7 +63,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { internalCluster().client() .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("4") .setWaitForEvents(Priority.LANGUID) .get() @@ -71,7 +74,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final ClusterState clusterState = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) @@ -111,7 +114,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { internalCluster().client() .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("3") .setWaitForEvents(Priority.LANGUID) .get() @@ -121,7 +124,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final ClusterState newClusterState = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index 9b117365777ce..ea127a7352914 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -35,7 +35,7 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep internalCluster().startNodes(2, masterNodeSettings); Settings dateNodeSettings = dataNode(); internalCluster().startNodes(2, dateNodeSettings); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("4") .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index 8cee57ee34b89..69923c787a054 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -111,7 +111,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale // allocation fails due to corruption marker assertBusy(() -> { - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final ShardRouting shardRouting = state.routingTable().index(indexName).shard(shardId.id()).primaryShard(); assertThat(shardRouting.state(), equalTo(ShardRoutingState.UNASSIGNED)); assertThat(shardRouting.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); @@ -143,7 +143,9 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale } public void checkHealthStatus(String indexName, ClusterHealthStatus healthStatus) { - final ClusterHealthStatus indexHealthStatus = clusterAdmin().health(new ClusterHealthRequest(indexName)).actionGet().getStatus(); + final ClusterHealthStatus indexHealthStatus = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indexName)) + .actionGet() + .getStatus(); assertThat(indexHealthStatus, is(healthStatus)); } @@ -169,7 +171,7 @@ private Path getIndexPath(String nodeName, ShardId shardId) { } private Set getAllocationIds(String indexName) { - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return state.metadata().index(indexName).inSyncAllocationIds(0); } @@ -181,7 +183,14 @@ private IndexSettings getIndexSettings(String indexName, String nodeName) { private String historyUUID(String node, String indexName) { final ShardStats[] shards = client(node).admin().indices().prepareStats(indexName).clear().get().getShards(); - final String nodeId = client(node).admin().cluster().prepareState().get().getState().nodes().resolveNode(node).getId(); + final String nodeId = client(node).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .resolveNode(node) + .getId(); assertThat(shards.length, greaterThan(0)); final Set historyUUIDs = Arrays.stream(shards) .filter(shard -> shard.getShardRouting().currentNodeId().equals(nodeId)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 543b0be8ae48d..8a2f5d749ff21 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -32,7 +32,7 @@ public void testNoDelayedTimeout() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopNode(findNodeWithShard()); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); ensureGreen("test"); } @@ -53,9 +53,12 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { Settings nodeWithShardDataPathSettings = internalCluster().dataPathSettings(nodeWithShard); internalCluster().stopNode(nodeWithShard); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); internalCluster().startNode(nodeWithShardDataPathSettings); // this will use the same data location as the stopped node ensureGreen("test"); } @@ -97,16 +100,19 @@ public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception { indexRandomData(); internalCluster().stopNode(findNodeWithShard()); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); logger.info("Setting shorter allocation delay"); updateIndexSettings( Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100)), "test" ); ensureGreen("test"); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); } /** @@ -123,15 +129,18 @@ public void testDelayedAllocationChangeWithSettingTo0() throws Exception { indexRandomData(); internalCluster().stopNode(findNodeWithShard()); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); updateIndexSettings( Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0)), "test" ); ensureGreen("test"); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); } private void indexRandomData() throws Exception { @@ -147,7 +156,7 @@ private void indexRandomData() throws Exception { } private String findNodeWithShard() { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); return state.nodes().get(randomFrom(startedShards).currentNodeId()).getName(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index d970634549209..9a13470eea255 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -118,7 +118,7 @@ public void testBulkWeirdScenario() throws Exception { private Settings createStaleReplicaScenario(String master) throws Exception { prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); - ClusterState state = clusterAdmin().prepareState().all().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); List shards = state.routingTable().allShards("test"); assertThat(shards.size(), equalTo(2)); @@ -164,7 +164,10 @@ private Settings createStaleReplicaScenario(String master) throws Exception { ); // kick reroute a second time and check that all shards are unassigned ClusterRerouteUtils.reroute(client(master)); - assertThat(client(master).admin().cluster().prepareState().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + assertThat( + client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getRoutingNodes().unassigned().size(), + equalTo(2) + ); return inSyncDataPathSettings; } @@ -197,7 +200,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce internalCluster().stopNode(dataNodeWithShardCopy); ensureStableCluster(1); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingTable() @@ -223,11 +226,11 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce logger.info("--> wait until shard is failed and becomes unassigned again"); assertTrue( - clusterAdmin().prepareState().get().getState().toString(), - clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().toString(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned() ); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingTable() @@ -302,7 +305,14 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { // search can throw an "all shards failed" exception. We will wait until the shard initialization has completed before // verifying the search hit count. assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().routingTable().index(idxName).allPrimaryShardsActive()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .routingTable() + .index(idxName) + .allPrimaryShardsActive() + ) ); } ShardStats[] shardStats = indicesAdmin().prepareStats("test") @@ -313,7 +323,7 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(useStaleReplica ? 1 : 0)); } // allocation id of old primary was cleaned from the in-sync set - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals( Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), @@ -402,7 +412,15 @@ public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() t .setSettings(indexSettings(1, 0).put("index.routing.allocation.exclude._name", node)) .get(); - assertThat(clusterAdmin().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty()); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .shardRoutingTable("test", 0) + .assignedShards(), + empty() + ); ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)); ensureGreen("test"); @@ -419,7 +437,10 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopNode(replicaNode); ensureYellow("test"); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -428,9 +449,19 @@ public boolean clearData(String nodeName) { }); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) + ); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() ); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(inSyncDataPathSettings); @@ -448,10 +479,16 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopNode(replicaNode); ensureYellow("test"); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); logger.info("--> indexing..."); prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -460,14 +497,31 @@ public boolean clearData(String nodeName) { }); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) + ); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() ); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(inSyncDataPathSettings); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) ); } @@ -506,7 +560,13 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { ensureGreen(indexName); assertEquals( 1, - clusterAdmin().prepareState().get().getState().routingTable().index(indexName).shardsWithState(ShardRoutingState.STARTED).size() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .routingTable() + .index(indexName) + .shardsWithState(ShardRoutingState.STARTED) + .size() ); } @@ -547,7 +607,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { internalCluster().stopNode(oldPrimary); // Checks that we fails replicas in one side but not mark them as stale. assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shardId); final String newPrimaryNode = state.getRoutingNodes().node(shardRoutingTable.primary.currentNodeId()).node().getName(); assertThat(newPrimaryNode, not(equalTo(oldPrimary))); @@ -563,7 +623,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { partition.ensureHealthy(internalCluster()); logger.info("--> stop disrupting network and re-enable allocation"); assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.routingTable().shardRoutingTable(shardId).activeShards(), hasSize(numberOfReplicas)); assertThat(state.metadata().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); for (String node : replicaNodes) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java index 57c4c0986a798..de61c6cf566c2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java @@ -52,7 +52,7 @@ public void testReplicaRemovalPriority() throws Exception { }); } - final String dataNodeIdFilter = clusterAdmin().prepareState() + final String dataNodeIdFilter = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .get() @@ -74,7 +74,7 @@ public void testReplicaRemovalPriority() throws Exception { ); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -90,7 +90,7 @@ public void testReplicaRemovalPriority() throws Exception { updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._id"), INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -107,7 +107,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(2, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -125,7 +125,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(1, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -143,7 +143,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(0, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 85a04ee6f1851..556836736a9f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -296,7 +296,7 @@ public void testShardCreation() throws Exception { createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); - final var clusterState = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); // verify non-DEFAULT roles reported in cluster state XContent assertRolesInRoutingTableXContent(clusterState); @@ -440,7 +440,7 @@ public void testPromotion() { @Nullable public AllocationCommand getCancelPrimaryCommand() { - final var indexRoutingTable = clusterAdmin().prepareState() + final var indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -488,7 +488,7 @@ public void testSearchRouting() throws Exception { assertEngineTypes(); final var searchShardProfileKeys = new HashSet(); - final var indexRoutingTable = clusterAdmin().prepareState() + final var indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java index 671c308f98fbb..6b97a8b6f3ad0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java @@ -48,7 +48,7 @@ private void removeAllocationFailuresInjection(String node) { private void awaitShardAllocMaxRetries() throws Exception { var maxRetries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(internalCluster().getDefaultSettings()); assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var index = state.getRoutingTable().index(INDEX); assertNotNull(index); var shard = index.shard(SHARD).primaryShard(); @@ -61,7 +61,7 @@ private void awaitShardAllocMaxRetries() throws Exception { private void awaitShardAllocSucceed() throws Exception { assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var index = state.getRoutingTable().index(INDEX); assertNotNull(index); var shard = index.shard(SHARD).primaryShard(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java index eb62ad5e6eec1..5509f46786a80 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -86,7 +86,7 @@ public void testFloodStageExceeded() throws Exception { final String newDataNodeName = internalCluster().startDataOnlyNode(); final String newDataNodeId = clusterAdmin().prepareNodesInfo(newDataNodeName).get().getNodes().get(0).getNode().getId(); assertBusy(() -> { - final ShardRouting primaryShard = clusterAdmin().prepareState() + final ShardRouting primaryShard = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .setNodes(true) @@ -103,7 +103,7 @@ public void testFloodStageExceeded() throws Exception { // Verify that the block is removed once the shard migration is complete refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertNull(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE)); } @@ -135,7 +135,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th // Verify that the block is removed refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertNull(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE)); // Re-enable and the blocks should be back! @@ -143,7 +143,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) ); refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java index e3b6f2ddba4c6..2f4b3588cf566 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java @@ -28,7 +28,7 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { logger.info("--> disabling allocation to capture shard failure"); disableAllocation("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final int shard = randomBoolean() ? 0 : 1; final String nodeId = state.routingTable().index("test").shard(shard).primaryShard().currentNodeId(); final String node = state.nodes().get(nodeId).getName(); @@ -38,7 +38,12 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { logger.info("--> waiting for a yellow index"); // we can't use ensureYellow since that one is just as happy with a GREEN status. - assertBusy(() -> assertThat(clusterAdmin().prepareHealth("test").get().getStatus(), equalTo(ClusterHealthStatus.YELLOW))); + assertBusy( + () -> assertThat( + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(), + equalTo(ClusterHealthStatus.YELLOW) + ) + ); final long term0 = shard == 0 ? 2 : 1; final long term1 = shard == 1 ? 2 : 1; @@ -53,7 +58,7 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { protected void assertPrimaryTerms(long shard0Term, long shard1Term) { for (String node : internalCluster().getNodeNames()) { logger.debug("--> asserting primary terms terms on [{}]", node); - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); IndexMetadata metadata = state.metadata().index("test"); assertThat(metadata.primaryTerm(0), equalTo(shard0Term)); assertThat(metadata.primaryTerm(1), equalTo(shard1Term)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index a1a29468cc5bd..106fd9530c3a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -231,7 +231,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard private Set getShardIds(final String nodeId, final String indexName) { final Set shardIds = new HashSet<>(); - final IndexRoutingTable indexRoutingTable = clusterAdmin().prepareState() + final IndexRoutingTable indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -319,7 +319,7 @@ private void refreshDiskUsage() { } assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setWaitForNoInitializingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 7464f83cb2814..fd7e9f8fb3572 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -75,7 +75,7 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); } - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -153,7 +153,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); } - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -211,7 +211,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { () -> assertBlocked(prepareIndex("test").setId("1").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK) ); - assertFalse(clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); // Cannot add further documents assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); @@ -261,7 +261,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") ); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -318,7 +318,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -414,7 +414,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") ); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -483,7 +483,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception private Map getShardCountByNodeId() { final Map shardCountByNodeId = new HashMap<>(); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info( "----> node {} has {} shards", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java index 921ed3265f1b6..be530f0bd4cb4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java @@ -100,7 +100,7 @@ public void testUpdateSameHostSetting() { updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), true)); final String indexName = "idx"; createIndex(indexName, 1, 1); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse( "replica should be unassigned", clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty() @@ -109,7 +109,7 @@ public void testUpdateSameHostSetting() { // the same host - the replica should get assigned updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue( "all shards should be assigned", clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index a142d594fe06e..a9767cce318d4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -43,7 +43,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { @After public void cleanup() throws Exception { assertAcked( - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull("*")) .setTransientSettings(Settings.builder().putNull("*")) ); @@ -64,7 +64,7 @@ private void testClusterNonExistingSettingsUpdate( String key1 = "no_idea_what_you_are_talking_about"; int value1 = 10; try { - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept(Settings.builder().put(key1, value1), builder); builder.get(); @@ -95,7 +95,7 @@ private void testDeleteIsAppliedFirst( final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).put(REROUTE_INTERVAL.getKey(), "42s"), builder); ClusterUpdateSettingsResponse response = builder.get(); @@ -106,7 +106,7 @@ private void testDeleteIsAppliedFirst( assertThat(REROUTE_INTERVAL.get(settingsFunction.apply(response)), equalTo(TimeValue.timeValueSeconds(42))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(42))); - ClusterUpdateSettingsRequestBuilder undoBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder undoBuilder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept( Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")).put(REROUTE_INTERVAL.getKey(), "43s"), undoBuilder @@ -124,7 +124,7 @@ public void testResetClusterTransientSetting() { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).build()) .get(); @@ -132,7 +132,7 @@ public void testResetClusterTransientSetting() { assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(7)); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(7)); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey())) .get(); @@ -140,7 +140,7 @@ public void testResetClusterTransientSetting() { assertNull(response.getTransientSettings().get(INITIAL_RECOVERIES.getKey())); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 8).put(REROUTE_INTERVAL.getKey(), "43s").build()) .get(); @@ -149,7 +149,7 @@ public void testResetClusterTransientSetting() { assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(8)); assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(TimeValue.timeValueSeconds(43))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(43))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))) .get(); @@ -164,7 +164,7 @@ public void testResetClusterPersistentSetting() { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 9).build()) .get(); @@ -172,7 +172,7 @@ public void testResetClusterPersistentSetting() { assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(9)); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(9)); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey())) .get(); @@ -180,7 +180,7 @@ public void testResetClusterPersistentSetting() { assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 10).put(REROUTE_INTERVAL.getKey(), "44s").build()) .get(); @@ -189,7 +189,7 @@ public void testResetClusterPersistentSetting() { assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(10)); assertThat(REROUTE_INTERVAL.get(response.getPersistentSettings()), equalTo(TimeValue.timeValueSeconds(44))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(44))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))) .get(); @@ -209,7 +209,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build(); Settings persistentSettings1 = Settings.builder().put(key2, value2).build(); - ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings1) .setPersistentSettings(persistentSettings1) .get(); @@ -223,7 +223,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings2 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build(); Settings persistentSettings2 = Settings.EMPTY; - ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings2) .setPersistentSettings(persistentSettings2) .get(); @@ -237,7 +237,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings3 = Settings.EMPTY; Settings persistentSettings3 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build(); - ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings3) .setPersistentSettings(persistentSettings3) .get(); @@ -267,7 +267,7 @@ private void testCanUpdateTracerSettings( final BiConsumer consumer, final Function settingsFunction ) { - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept( Settings.builder().putList("transport.tracer.include", "internal:index/shard/recovery/*", "internal:gateway/local*"), builder @@ -300,7 +300,10 @@ private void testUpdateSettings( ) { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; - ClusterUpdateSettingsRequestBuilder initialBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder initialBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 42), initialBuilder); ClusterUpdateSettingsResponse response = initialBuilder.get(); @@ -310,7 +313,10 @@ private void testUpdateSettings( assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42)); try { - ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), "whatever"), badBuilder); badBuilder.get(); fail("bogus value"); @@ -321,7 +327,10 @@ private void testUpdateSettings( assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42)); try { - ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), -1), badBuilder); badBuilder.get(); fail("bogus value"); @@ -346,9 +355,13 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO if (readOnlyAllowDelete) { settingsBuilder.put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), "true"); } - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(settingsBuilder).setTransientSettings(settingsBuilder)); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder) + .setTransientSettings(settingsBuilder) + ); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (readOnly) { assertTrue(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().transientSettings())); assertTrue(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().persistentSettings())); @@ -365,7 +378,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO .build(); restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(brokenMeta)); ensureGreen(); // wait for state recovery - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue(state.getMetadata().persistentSettings().getAsBoolean("archived.this.is.unknown", false)); // cannot remove read only block due to archived settings @@ -375,7 +388,9 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(builder) + .setTransientSettings(builder) ); assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); } @@ -383,7 +398,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings with non-archived settings final ClusterBlockException e2 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable")) .setTransientSettings(Settings.builder().putNull("archived.*")) ); @@ -397,7 +412,8 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings due to cluster read only block final ClusterBlockException e3 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull("archived.*")) ); if (readOnly) { assertTrue(e3.getMessage().contains("cluster read-only (api)")); @@ -419,7 +435,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO } final ClusterBlockException e4 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(builder) ); if (readOnly) { assertTrue(e4.getMessage().contains("cluster read-only (api)")); @@ -436,7 +452,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); final ClusterBlockException e5 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(builder) ); if (readOnly) { assertTrue(e5.getMessage().contains("cluster read-only (api)")); @@ -450,9 +466,14 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO Settings.Builder builder = Settings.builder().putNull("archived.*"); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(builder) + .setTransientSettings(builder) + .get() + ); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().transientSettings())); assertFalse(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(state.getMetadata().transientSettings())); assertFalse(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().persistentSettings())); @@ -477,7 +498,7 @@ public void testClusterUpdateSettingsWithBlocks() { String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; Settings persistentSettings = Settings.builder().put(key2, "5").build(); - ClusterUpdateSettingsRequestBuilder request = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsRequestBuilder request = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings) .setPersistentSettings(persistentSettings); @@ -488,7 +509,9 @@ public void testClusterUpdateSettingsWithBlocks() { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings settings = Settings.builder().putNull(Metadata.SETTING_READ_ONLY_SETTING.getKey()).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings).get() + ); } finally { setClusterReadOnly(false); @@ -498,12 +521,14 @@ public void testClusterUpdateSettingsWithBlocks() { try { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings settings = Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings).get() + ); assertBlocked(request, Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); } finally { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings s = Settings.builder().putNull(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey()).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(s).get()); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(s).get()); } // It should work now @@ -541,7 +566,7 @@ private void testLoggerLevelUpdate(final BiConsumer updating cluster settings"); var future = client(masterNode).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(BlockingClusterSettingTestPlugin.TEST_BLOCKING_SETTING.getKey(), true).build()) .setMasterNodeTimeout(TimeValue.timeValueMillis(100L)) .execute(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index 895bd6932fdb9..7e9406dfcf09a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -88,7 +88,7 @@ public void testMultipleIndicesAllocation() { .addAliasAction(AliasActions.add().index("test1").alias("routing_alias").routing("ABC")) .addAliasAction(AliasActions.add().index("test2").alias("routing_alias").routing("EFG")) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "routing_alias")); assertThat(response.getGroups().length, equalTo(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 31dd002a6af7d..7a66cb3abb7cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -49,11 +49,11 @@ public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { if (frequently()) { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) .get(); } else { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) .get(); } @@ -67,7 +67,7 @@ public void testMinimumPerNode() { } public void testIndexCreationOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -95,12 +95,12 @@ public void testIndexCreationOverLimit() { } catch (IllegalArgumentException e) { verifyException(dataNodes, counts, e); } - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } public void testIndexCreationOverLimitFromTemplate() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -126,12 +126,12 @@ public void testIndexCreationOverLimitFromTemplate() { final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareCreate("should-fail")); verifyException(dataNodes, counts, e); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } public void testIncreaseReplicasOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -158,12 +158,12 @@ public void testIncreaseReplicasOverLimit() { + ";"; assertEquals(expectedError, e.getMessage()); } - Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); + Metadata clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); assertEquals(0, clusterState.index("growing-should-fail").getNumberOfReplicas()); } public void testChangingMultipleIndicesOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -219,13 +219,13 @@ public void testChangingMultipleIndicesOverLimit() { + ";"; assertEquals(expectedError, e.getMessage()); } - Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); + Metadata clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); assertEquals(firstIndexReplicas, clusterState.index("test-1-index").getNumberOfReplicas()); assertEquals(secondIndexReplicas, clusterState.index("test-2-index").getNumberOfReplicas()); } public void testPreserveExistingSkipsCheck() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -245,7 +245,7 @@ public void testPreserveExistingSkipsCheck() { .setPreserveExisting(true) .setSettings(Settings.builder().put("number_of_replicas", dataNodes)) ); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals(0, clusterState.getMetadata().index("test-index").getNumberOfReplicas()); } @@ -266,7 +266,7 @@ public void testRestoreSnapshotOverLimit() { .setSettings(repoSettings.build()) ); - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); createIndex( "snapshot-index", @@ -330,13 +330,13 @@ public void testRestoreSnapshotOverLimit() { verifyException(dataNodes, counts, e); } ensureGreen(); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } public void testOpenIndexOverLimit() { Client client = client(); - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); createIndex( @@ -348,7 +348,7 @@ public void testOpenIndexOverLimit() { .build() ); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertFalse(healthResponse.isTimedOut()); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-index-1").get(); @@ -371,7 +371,7 @@ public void testOpenIndexOverLimit() { } catch (IllegalArgumentException e) { verifyException(dataNodes, counts, e); } - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } @@ -379,17 +379,22 @@ private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(">=2") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); - dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); } return dataNodes; } private void setShardsPerNode(int shardsPerNode) { try { - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) .get(); assertEquals(shardsPerNode, response.getPersistentSettings().getAsInt(shardsPerNodeKey, -1).intValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index a0fa63aa58ab5..6ae7e0f7e84e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -293,7 +293,14 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { NetworkDisruption scheme = addRandomDisruptionType(partitions); scheme.startDisrupting(); ensureStableCluster(2, notIsolatedNode); - assertFalse(client(notIsolatedNode).admin().cluster().prepareHealth("test").setWaitForYellowStatus().get().isTimedOut()); + assertFalse( + client(notIsolatedNode).admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForYellowStatus() + .get() + .isTimedOut() + ); DocWriteResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test").setSource("field", "value").get(); assertThat(indexResponse.getVersion(), equalTo(1L)); @@ -424,12 +431,12 @@ public boolean validateClusterForming() { }); assertBusy(() -> { - assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); + assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get().isTimedOut()); assertTrue( internalCluster().client(masterNode) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("2") .setTimeout(TimeValue.timeValueSeconds(2)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index cad5c8f524bc7..b512f369c76d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -145,7 +145,7 @@ public void testElectMasterWithLatestVersion() throws Exception { isolateAllNodes.stopDisrupting(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (state.metadata().hasIndex("test") == false) { fail("index 'test' was lost. current cluster state: " + state); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index f8bdf17e2cec8..601266b50d237 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -158,7 +158,13 @@ private String xContentToString(ChunkedToXContent xContent) throws IOException { private void ensureNoMaster(String node) throws Exception { assertBusy( () -> assertNull( - client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode() + client(node).admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).local(true)) + .get() + .getState() + .nodes() + .getMasterNode() ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java index 3aa97f79a82da..eeda0257fb0e4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java @@ -113,12 +113,14 @@ private void assertShardInfo(ReplicationResponse response, int expectedTotal, in private void ensureActiveShardCopies(final int shardId, final int copyCount) throws Exception { assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.routingTable().index("idx"), not(nullValue())); assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue())); assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("idx").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("idx").setActiveOnly(true).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index 8c6abc3e14cd8..00a4f170cf7eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -153,7 +153,7 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { String node = internalCluster().startNode(); prepareCreate("test").get(); indexRandom(true, prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); - String nodeId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); + String nodeId = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId(); final Settings dataPathSettings = internalCluster().dataPathSettings(node); internalCluster().stopRandomDataNode(); @@ -235,7 +235,7 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { dataPaths.forEach(path -> assertTrue(Files.isDirectory(path.resolve("nodes")))); internalCluster().startNode(dataPathSettings); dataPaths.forEach(path -> assertTrue(Files.isRegularFile(path.resolve("nodes")))); - assertEquals(nodeId, clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId()); + assertEquals(nodeId, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId()); assertTrue(indexExists("test")); ensureYellow("test"); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index 24bf198b7b42f..a695c46bcbfa2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -32,7 +32,7 @@ public void testClusterHasFeatures() { assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) - var response = clusterAdmin().state(new ClusterStateRequest().clear().nodes(true)).actionGet(); + var response = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().nodes(true)).actionGet(); var features = response.getState().clusterFeatures().nodeFeatures(); Set missing = features.entrySet() .stream() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index e05bda69d2c9c..92c1e9729b460 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -92,7 +92,7 @@ public void testMappingMetadataParsed() throws Exception { .get(); logger.info("--> verify meta _routing required exists"); - MappingMetadata mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); + MappingMetadata mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); logger.info("--> restarting nodes..."); @@ -102,7 +102,7 @@ public void testMappingMetadataParsed() throws Exception { ensureYellow(); logger.info("--> verify meta _routing required exists"); - mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); + mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); } @@ -118,7 +118,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -132,7 +132,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -158,7 +158,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> verifying that the state is green"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -172,7 +172,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -181,7 +181,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for two nodes and green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -199,7 +199,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -233,11 +233,14 @@ public Settings onNodeStopped(String nodeName) { }); logger.info("--> waiting for test index to be created"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setIndices("test") + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify we have an index"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertThat(clusterStateResponse.getState().metadata().hasIndex("test"), equalTo(true)); } @@ -264,7 +267,7 @@ public void testTwoNodesSingleDoc() throws Exception { prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") @@ -279,7 +282,7 @@ public void testTwoNodesSingleDoc() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -287,7 +290,11 @@ public void testTwoNodesSingleDoc() throws Exception { indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); - health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").get(); + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -337,7 +344,8 @@ public Settings onNodeStopped(final String nodeName) throws Exception { logger.info("--> wait until all nodes are back online"); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForEvents(Priority.LANGUID).waitForNodes(Integer.toString(numNodes)) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForEvents(Priority.LANGUID) + .waitForNodes(Integer.toString(numNodes)) ).actionGet(); logger.info("--> waiting for green status"); @@ -372,13 +380,13 @@ public void testRecoverBrokenIndexMetadata() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) @@ -395,7 +403,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { // check that the cluster does not keep reallocating shards assertBusy(() -> { - final RoutingTable routingTable = clusterAdmin().prepareState().get().getState().routingTable(); + final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { @@ -410,7 +418,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { }, 60, TimeUnit.SECONDS); indicesAdmin().prepareClose("test").get(); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals(IndexMetadata.State.CLOSE, state.getMetadata().index(metadata.getIndex()).getState()); assertEquals("boolean", state.getMetadata().index(metadata.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); // try to open it with the broken setting - fail again! @@ -449,13 +457,13 @@ public void testRecoverMissingAnalyzer() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) @@ -464,7 +472,7 @@ public void testRecoverMissingAnalyzer() throws Exception { // check that the cluster does not keep reallocating shards assertBusy(() -> { - final RoutingTable routingTable = clusterAdmin().prepareState().get().getState().routingTable(); + final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { @@ -497,13 +505,13 @@ public void testArchiveBrokenClusterSettings() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Metadata metadata = state.getMetadata(); final Metadata brokenMeta = Metadata.builder(metadata) @@ -518,7 +526,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(brokenMeta)); ensureYellow("test"); // wait for state recovery - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals("true", state.metadata().persistentSettings().get("archived.this.is.unknown")); assertEquals( "broken", @@ -528,7 +536,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { // delete these settings updateClusterSettings(Settings.builder().putNull("archived.*")); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNull(state.metadata().persistentSettings().get("archived.this.is.unknown")); assertNull( state.metadata().persistentSettings().get("archived." + ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java index 1e34967073adc..94824db66d152 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java @@ -69,7 +69,7 @@ public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Excepti logger.debug("relocating index..."); updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2), index); - clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); ensureGreen(); assertIndexDirectoryDeleted(node1, resolveIndex); assertIndexInMetaState(node2, index); @@ -98,7 +98,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { logger.info("--> close index"); indicesAdmin().prepareClose(index).get(); // close the index - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterStateResponse.getState().getMetadata().index(index).getState().name(), equalTo(IndexMetadata.State.CLOSE.name())); // update the mapping. this should cause the new meta data to be written although index is closed diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 15a72e3534b50..f1b06e4efc97d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -62,7 +62,7 @@ public void doAfterNodes(int numNodes, final Client activeClient) throws Excepti ClusterHealthResponse clusterHealth = activeClient.admin() .cluster() .health( - new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForYellowStatus() .waitForNodes("2") .waitForActiveShards(test.numPrimaries * 2) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index b55dd5e207c41..4281562b64791 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -35,7 +35,7 @@ public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeCli do { blocks = nodeClient.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setLocal(true) .get() .getState() @@ -55,33 +55,75 @@ public void testRecoverAfterDataNodes() { logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(dataOnlyNode())); assertThat( - master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + data1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start master_node (2)"); Client master2 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master2.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + data1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master2.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 26573644790fa..193f025e6843b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -142,7 +142,7 @@ private Map assertAndCapturePrimaryTerms(Map pre previousTerms = new HashMap<>(); } final Map result = new HashMap<>(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (IndexMetadata indexMetadata : state.metadata().indices().values()) { final String index = indexMetadata.getIndex().getName(); final long[] previous = previousTerms.get(index); @@ -316,7 +316,10 @@ public void testTwoNodeFirstNodeCleared() throws Exception { Map primaryTerms = assertAndCapturePrimaryTerms(null); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(firstNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, firstNode) + ).get(); internalCluster().fullRestart(new RestartCallback() { @Override @@ -342,7 +345,8 @@ public boolean clearData(String nodeName) { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); } - client().execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest()).get(); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT)) + .get(); } public void testLatestVersionLoaded() throws Exception { @@ -364,7 +368,7 @@ public void testLatestVersionLoaded() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); } - String metadataUuid = clusterAdmin().prepareState().execute().get().getState().getMetadata().clusterUUID(); + String metadataUuid = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().getMetadata().clusterUUID(); assertThat(metadataUuid, not(equalTo("_na_"))); logger.info("--> closing first node, and indexing more data to the second node"); @@ -420,13 +424,16 @@ public void testLatestVersionLoaded() throws Exception { logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); - assertThat(clusterAdmin().prepareState().execute().get().getState().getMetadata().clusterUUID(), equalTo(metadataUuid)); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().getMetadata().clusterUUID(), + equalTo(metadataUuid) + ); for (int i = 0; i < 10; i++) { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 3); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().templates().get("template_1").patterns(), equalTo(Collections.singletonList("te*"))); assertThat(state.metadata().index("test").getAliases().get("test_alias"), notNullValue()); assertThat(state.metadata().index("test").getAliases().get("test_alias").filter(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index ae0a1e15923ec..02c566f49e2b3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -202,7 +202,7 @@ public void testRecentPrimaryInformation() throws Exception { ); internalCluster().startDataOnlyNode(nodeWithReplicaSettings); // need to wait for events to ensure the reroute has happened since we perform it async when a new node joins. - clusterAdmin().prepareHealth(indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get(); blockRecovery.countDown(); ensureGreen(indexName); assertThat(internalCluster().nodesInclude(indexName), hasItem(newNode)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java index 660d6028486a0..30fc7e263a4c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java @@ -253,7 +253,7 @@ private static void updateSettings(InternalTestCluster internalCluster, Settings internalCluster.client() .admin() .cluster() - .updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(settings)) + .updateSettings(new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings(settings)) .actionGet(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index 14697cc6533c1..a5931f29b9ff0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -88,7 +88,7 @@ private void waitForAllNodesToReportHealth() throws Exception { ClusterState state = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setNodes(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java index 5a5fad9be3ef2..b6477a7e1a6c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java @@ -136,13 +136,22 @@ private void decreasePollingInterval(InternalTestCluster internalCluster) { .admin() .cluster() .updateSettings( - new ClusterUpdateSettingsRequest().persistentSettings( + new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(LocalHealthMonitor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(10)) ) ); } private static Map getNodes(InternalTestCluster internalCluster) { - return internalCluster.client().admin().cluster().prepareState().clear().setNodes(true).get().getState().getNodes().getNodes(); + return internalCluster.client() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .getNodes() + .getNodes(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java index da89f3252bec0..97dd3d9723d5f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java @@ -396,7 +396,15 @@ public Settings onNodeStopped(String nodeName) { } private String getCoordinatingOnlyNode() { - return clusterAdmin().prepareState().get().getState().nodes().getCoordinatingOnlyNodes().values().iterator().next().getName(); + return clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getCoordinatingOnlyNodes() + .values() + .iterator() + .next() + .getName(); } private Tuple getPrimaryReplicaNodeNames() { @@ -413,7 +421,7 @@ private Tuple getPrimaryReplicaNodeNames() { .findAny() .get() .currentNodeId(); - DiscoveryNodes nodes = clusterAdmin().prepareState().get().getState().nodes(); + DiscoveryNodes nodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes(); String primaryName = nodes.get(primaryId).getName(); String replicaName = nodes.get(replicaId).getName(); return new Tuple<>(primaryName, replicaName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 53f632f6ba8d5..749cf73e822ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -84,7 +84,7 @@ public void testPostOperationGlobalCheckpointSync() throws Exception { public void testBackgroundGlobalCheckpointSync() throws Exception { runGlobalCheckpointSyncTest(TimeValue.timeValueSeconds(randomIntBetween(1, 3)), client -> { // prevent global checkpoint syncs between all nodes - final DiscoveryNodes nodes = client.admin().cluster().prepareState().get().getState().getNodes(); + final DiscoveryNodes nodes = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes(); for (final DiscoveryNode node : nodes) { for (final DiscoveryNode other : nodes) { if (node == other) { @@ -105,7 +105,7 @@ public void testBackgroundGlobalCheckpointSync() throws Exception { } }, client -> { // restore global checkpoint syncs between all nodes - final DiscoveryNodes nodes = client.admin().cluster().prepareState().get().getState().getNodes(); + final DiscoveryNodes nodes = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes(); for (final DiscoveryNode node : nodes) { for (final DiscoveryNode other : nodes) { if (node == other) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index c9906ccf1fbee..5a4785252bf98 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -558,7 +558,7 @@ private void runWaitForShardsTest( .build(); assertAcked(prepareCreate("index").setSettings(settings)); ensureYellowAndNoInitializingShards("index"); - assertFalse(clusterAdmin().prepareHealth("index").setWaitForActiveShards(numDataNodes).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").setWaitForActiveShards(numDataNodes).get().isTimedOut()); final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index bdfe629f4bab0..b7dbcf42630e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -194,7 +194,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { waitNoPendingTasksOnAll(); String nodeId = null; - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { final String name = cursor.getValue().getName(); @@ -350,7 +350,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); String primaryNodeId = null; - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { final String name = cursor.getValue().getName(); @@ -524,7 +524,7 @@ public void testResolvePath() throws Exception { ensureGreen(indexName); final Map nodeNameToNodeId = new HashMap<>(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { nodeNameToNodeId.put(cursor.getValue().getName(), cursor.getKey()); @@ -569,7 +569,7 @@ public void testResolvePath() throws Exception { } private Path getPathToShardData(String indexName, String dirSuffix) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { indexName }, false); List iterators = iterableAsArrayList(shardIterators); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4bd8fadc93095..f3b888022127f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -180,14 +180,18 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException */ setReplicaCount(2, "test"); ClusterHealthResponse health = clusterAdmin().health( - new ClusterHealthRequest("test").waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test").waitForGreenStatus() // sometimes due to cluster rebalancing and random settings default timeout is just not enough. .masterNodeTimeout(TimeValue.timeValueMinutes(5)) .timeout(TimeValue.timeValueMinutes(5)) .waitForNoRelocatingShards(true) ).actionGet(); if (health.isTimedOut()) { - logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); + logger.info( + "cluster state:\n{}\n{}", + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), + getClusterPendingTasks() + ); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -288,18 +292,24 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted ClusterRerouteUtils.reroute(client()); boolean didClusterTurnRed = waitUntil(() -> { - ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest("test")).actionGet().getStatus(); + ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")) + .actionGet() + .getStatus(); return test == ClusterHealthStatus.RED; }, 5, TimeUnit.MINUTES);// sometimes on slow nodes the replication / recovery is just dead slow - final ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest("test")).get(); + final ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")).get(); if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); + logger.info( + "cluster state:\n{}\n{}", + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), + getClusterPendingTasks() + ); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { "test" }, false); for (ShardIterator iterator : shardIterators) { @@ -509,7 +519,7 @@ public void onTimeout(TimeValue timeout) { } private void assertThatAllShards(String index, Consumer verifier) { - var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().routingTable(true)).actionGet(); + var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).routingTable(true)).actionGet(); var indexRoutingTable = clusterStateResponse.getState().getRoutingTable().index(index); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { verifier.accept(indexRoutingTable.shard(shardId)); @@ -655,7 +665,7 @@ public void testReplicaCorruption() throws Exception { } private int numShards(String... index) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); return shardIterators.size(); } @@ -682,7 +692,7 @@ private ShardRouting corruptRandomPrimaryFile() throws IOException { } private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index test = state.metadata().index("test").getIndex(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { "test" }, false); @@ -738,7 +748,7 @@ private static boolean isPerSegmentFile(String fileName) { public List listShardFiles(ShardRouting routing) throws IOException { NodesStatsResponse nodeStatses = clusterAdmin().prepareNodesStats(routing.currentNodeId()).setFs(true).get(); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Index test = state.metadata().index("test").getIndex(); assertThat(routing.toString(), nodeStatses.getNodes().size(), equalTo(1)); List files = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 143ffedeefc55..45b9091ab2552 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -144,7 +144,7 @@ private SearchRequestBuilder addSuggestions(SearchRequestBuilder request, int i) } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -159,7 +159,7 @@ private Set nodeIdsWithIndex(String... indices) { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 5c4cdc8cde851..8ea707c0a26b3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -58,7 +58,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet(); assertAcked(createIndexResponse); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); @@ -68,7 +68,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 2 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -83,7 +83,9 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { ClusterRerouteUtils.reroute(client()); clusterHealth = clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() + .waitForNodes("2") + .waitForNoRelocatingShards(true) ).actionGet(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -94,7 +96,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node1, node2); routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0)); @@ -109,7 +111,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 3 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("3") .setWaitForNoRelocatingShards(true) @@ -123,7 +125,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) ClusterRerouteUtils.reroute(client()); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("3") .setWaitForNoRelocatingShards(true) @@ -138,7 +140,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node1, node2, node3); routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); @@ -165,7 +167,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { internalCluster().stopNode(server_1); // verify health logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -177,7 +179,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { ClusterRerouteUtils.reroute(client()); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -189,7 +191,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node3, node2); routingNodeEntry2 = clusterState.getRoutingNodes().node(node2); routingNodeEntry3 = clusterState.getRoutingNodes().node(node3); @@ -207,7 +209,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").get(); assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node3, node2); routingNodeEntry2 = clusterState.getRoutingNodes().node(node2); assertThat(routingNodeEntry2.isEmpty(), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java index fb22aaa3747c2..69e982a30b354 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -177,7 +177,7 @@ public void testCreateIndexWithDateMathExpression() { assertEquals(dateMathExp3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); }); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index(index1), notNullValue()); assertThat(clusterState.metadata().index(index2), notNullValue()); assertThat(clusterState.metadata().index(index3), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index e9e88a2d6b76c..325cd27f0090f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -97,7 +97,7 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { fail("should have thrown an exception during creation"); } catch (Exception e) { assertTrue(e.getMessage().contains("failing on purpose")); - assertFalse(clusterAdmin().prepareState().get().getState().routingTable().hasIndex("failed")); + assertFalse(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable().hasIndex("failed")); } } @@ -120,7 +120,7 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("index1", 0, node1, node2)); ensureGreen("index1"); - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("Final routing is {}", state.getRoutingNodes().toString()); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); @@ -148,13 +148,13 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { // await all relocation attempts are exhausted assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); assertThat(shard.relocationFailureInfo().failedRelocations(), equalTo(maxAttempts)); }); // ensure the shard remain started - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("Final routing is {}", state.getRoutingNodes().toString()); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); @@ -177,7 +177,7 @@ public void testIndexStateShardChanged() throws Throwable { fail("should have thrown an exception"); } catch (ElasticsearchException e) { assertTrue(e.getMessage().contains("failing on purpose")); - ClusterStateResponse resp = clusterAdmin().prepareState().get(); + ClusterStateResponse resp = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertFalse(resp.getState().routingTable().indicesRouting().keySet().contains("failed")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java index c4be9568f8bab..1ca8fb315b09f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java @@ -677,7 +677,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_1).admin() .cluster() - .prepareHealth(remoteIndex1) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex1) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() @@ -715,7 +715,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_2).admin() .cluster() - .prepareHealth(remoteIndex2) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex2) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java index 28e89f4590557..d263a9d658891 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java @@ -106,7 +106,7 @@ public void assertMatched() {} updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"), indexName); ensureYellow(indexName); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); - assertEquals(ClusterHealthStatus.YELLOW, clusterAdmin().prepareHealth(indexName).get().getStatus()); + assertEquals(ClusterHealthStatus.YELLOW, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getStatus()); mockLog.assertAllExpectationsMatched(); } @@ -153,7 +153,7 @@ public void testShardLockTimeout() throws Exception { updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"), indexName); assertBusy(mockLog::assertAllExpectationsMatched); - final var clusterHealthResponse = clusterAdmin().prepareHealth(indexName) + final var clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(10)) .setWaitForNoInitializingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 720f48754519b..57f09e1ed2bb1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -62,7 +62,7 @@ public void testSimpleGetMappings() throws Exception { indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).get(); indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).get(); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0008ec1f9cbd2..3d240627cf23f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -62,7 +62,7 @@ public void testDynamicUpdates() throws Exception { indicesAdmin().prepareCreate("test") .setSettings(indexSettings(1, 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); updateClusterSettings( Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5)) ); @@ -100,7 +100,7 @@ public void testUpdateMappingWithoutType() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} """).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}} @@ -115,7 +115,7 @@ public void testUpdateMappingWithoutType() { public void testUpdateMappingWithoutTypeMultiObjects() { createIndex("test", 1, 0); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).get(); @@ -131,7 +131,7 @@ public void testUpdateMappingWithConflicts() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} """).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); try { indicesAdmin().preparePutMapping("test").setSource(""" @@ -163,7 +163,7 @@ public void testUpdateMappingWithNormsConflicts() { public void testUpdateMappingNoChanges() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}}""").get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text"}}}} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 705fb879e9125..e547ae5a46deb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -319,7 +319,7 @@ public void testCanResetUnreasonableSettings() { reset(); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java index ff2117ea93bb9..56fcb3c1d123d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -63,7 +63,12 @@ public void testCircuitBreakerTripCountMetric() { // NOTE: we start with empty circuitBreakerSettings to allow cluster formation masterNodeName = internalCluster().startMasterOnlyNode(Settings.EMPTY); dataNodeName = internalCluster().startDataOnlyNode(Settings.EMPTY); - assertTrue(clusterAdmin().prepareUpdateSettings().setPersistentSettings(circuitBreakerSettings).get().isAcknowledged()); + assertTrue( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(circuitBreakerSettings) + .get() + .isAcknowledged() + ); assertTrue( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java index 0b9ca9d9f9586..a51f4bb10dc00 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java @@ -325,7 +325,7 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc } } - final Metadata metadata = clusterAdmin().prepareState().clear().setMetadata(true).get().getState().metadata(); + final Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get().getState().metadata(); assertTrue(metadata.indexGraveyard().toString(), metadata.indexGraveyard().containsIndex(new Index(INDEX_NAME, danglingIndexUUID))); assertNull(Strings.toString(metadata, true, true), metadata.index(INDEX_NAME)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index a9e06fe438c41..64f594c488a5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -53,7 +53,7 @@ public void run() { }; indexingThread.start(); - ClusterState initialState = clusterAdmin().prepareState().get().getState(); + ClusterState initialState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode[] dataNodes = initialState.getNodes().getDataNodes().values().toArray(DiscoveryNode[]::new); DiscoveryNode relocationSource = initialState.getNodes() .getDataNodes() @@ -65,7 +65,7 @@ public void run() { } logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName()); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) @@ -77,7 +77,7 @@ public void run() { "timed out waiting for relocation iteration [" + i + "]", ReferenceDocs.LOGGING ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("timed out for waiting for relocation iteration [{}] \ncluster state {}", i, clusterState); finished.set(true); indexingThread.join(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index fbbeec4b4e9ba..abeaf8422748d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -523,7 +523,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { transportService.clearAllRules(); // make sure nodeA has primary and nodeB has replica - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); assertThat(startedShards.size(), equalTo(2)); for (ShardRouting shardRouting : startedShards) { @@ -635,7 +635,7 @@ public void testRerouteRecovery() throws Exception { logger.info("--> start node C"); String nodeC = internalCluster().startNode(); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").get().isTimedOut()); logger.info("--> slowing down recoveries"); throttleRecovery10Seconds(shardSize); @@ -1118,7 +1118,7 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).putNull("index.routing.allocation.include._name"), indexName ); - assertFalse(clusterAdmin().prepareHealth(indexName).setWaitForActiveShards(2).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForActiveShards(2).get().isTimedOut()); } finally { allowToCompletePhase1Latch.countDown(); } @@ -1261,7 +1261,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1325,7 +1325,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1432,7 +1432,7 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1657,7 +1657,7 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { String indexName = "test-index"; createIndex(indexName, indexSettings(1, 1).put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); ensureGreen(indexName); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode nodeWithOldPrimary = clusterState.nodes() .get(clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId()); final var transportService = MockTransportService.getInstance(nodeWithOldPrimary.getName()); @@ -1731,7 +1731,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode nodeWithPrimary = clusterState.nodes() .get(clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 8595f11bae428..25ac384a22917 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -55,7 +55,11 @@ public void testPromoteReplicaToPrimary() throws Exception { } // pick up a data node that contains a random primary shard - ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + ClusterState state = client(internalCluster().getMasterName()).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState(); final int numShards = state.metadata().index(indexName).getNumberOfShards(); final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard(); final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId()); @@ -64,7 +68,7 @@ public void testPromoteReplicaToPrimary() throws Exception { internalCluster().stopNode(randomNode.getName()); ensureYellowAndNoInitializingShards(indexName); - state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + state = client(internalCluster().getMasterName()).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexRoutingTable indexRoutingTable = state.routingTable().index(indexName); for (int i = 0; i < indexRoutingTable.size(); i++) { for (ShardRouting shardRouting : indexRoutingTable.shard(i).activeShards()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 2f336f25c3cab..d117373b58f05 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -284,7 +284,7 @@ private ShardSnapshotsService getShardSnapshotsService() { } private ShardId getShardIdForIndex(String indexName) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return state.routingTable().index(indexName).shard(0).shardId(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index 67482ad733676..606b694cbfeb9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -36,7 +36,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { logger.info("Creating index test"); assertAcked(prepareCreate("test", 2)); logger.info("Running Cluster Health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .get(); @@ -62,11 +62,16 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 10L); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("Increasing the number of replicas from 1 to 2"); setReplicaCount(2, "test"); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -79,7 +84,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { // only 2 copies allocated (1 replica) across 2 nodes assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterReplicaIncreaseSettingsVersion = clusterAdmin().prepareState() + final long afterReplicaIncreaseSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -90,7 +95,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { logger.info("starting another node to new replicas will be allocated to it"); allowNodes("test", 3); - final long afterStartingAnotherNodeVersion = clusterAdmin().prepareState() + final long afterStartingAnotherNodeVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -98,7 +103,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .getSettingsVersion(); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) @@ -120,7 +125,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { setReplicaCount(0, "test"); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) @@ -138,7 +143,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 10); } - final long afterReplicaDecreaseSettingsVersion = clusterAdmin().prepareState() + final long afterReplicaDecreaseSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -155,7 +160,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -170,7 +175,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { if (randomBoolean()) { assertAcked(indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.ALL)); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -183,13 +188,18 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> add another node, should increase the number of replicas"); allowNodes("test", 3); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -202,7 +212,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3)); - final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -215,7 +225,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { allowNodes("test", 2); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -228,7 +238,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -241,7 +251,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { allowNodes("test", 1); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes(">=1") @@ -254,7 +264,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries)); - final long afterClosingAnotherNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingAnotherNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -271,7 +281,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -286,7 +296,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { if (randomBoolean()) { assertAcked(indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.ALL)); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -299,12 +309,17 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> add another node, should increase the number of replicas"); allowNodes("test", 3); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -316,7 +331,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3)); - final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -329,7 +344,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { allowNodes("test", 2); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes(">=2") @@ -342,7 +357,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -355,7 +370,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { allowNodes("test", 1); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes(">=1") @@ -376,7 +391,7 @@ public void testAutoExpandNumberReplicas2() { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -392,12 +407,17 @@ public void testAutoExpandNumberReplicas2() { allowNodes("test", 4); allowNodes("test", 5); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> update the auto expand replicas to 0-3"); updateIndexSettings(Settings.builder().put("auto_expand_replicas", "0-3"), "test"); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 4) @@ -414,14 +434,19 @@ public void testAutoExpandNumberReplicas2() { * time from the number of replicas changed by the allocation service. */ assertThat( - clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getSettingsVersion(), equalTo(1 + 1 + settingsVersion) ); } public void testUpdateWithInvalidNumberOfReplicas() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int value = randomIntBetween(-10, -1); try { indicesAdmin().prepareUpdateSettings("test") @@ -431,7 +456,7 @@ public void testUpdateWithInvalidNumberOfReplicas() { } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage()); assertThat( - clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getSettingsVersion(), equalTo(settingsVersion) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 6e58d275e578f..20089cd463bff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -64,7 +64,7 @@ public void testInvalidDynamicUpdate() { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")) ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNotEquals(indexMetadata.getSettings().get("index.dummy"), "invalid dynamic value"); } @@ -141,44 +141,48 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) .setPersistentSettings(Settings.builder().put("cluster.acc.test.user", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); if (randomBoolean()) { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf").put("cluster.acc.test.user", "asdf")) .get(); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull("cluster.acc.test.pw").putNull("cluster.acc.test.user")) .get(); } else { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf").put("cluster.acc.test.user", "asdf")) .get(); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -230,7 +234,7 @@ public void testResetDefaultWithWildcard() { createIndex("test"); indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", -1)).get(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -239,7 +243,7 @@ public void testResetDefaultWithWildcard() { } } indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -259,7 +263,7 @@ public void testResetDefault() { .put("index.translog.generation_threshold_size", "4096b") ) .get(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -270,7 +274,7 @@ public void testResetDefault() { } } indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.refresh_interval")).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -303,7 +307,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.final", "no") ) // this one can't ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), nullValue()); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -318,7 +322,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .setSettings(Settings.builder().put("index.refresh_interval", -1)) // this one can change .get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("-1")); // Now verify via dedicated get settings api: getSettingsResponse = indicesAdmin().prepareGetSettings("test").get(); @@ -327,7 +331,7 @@ public void testOpenCloseUpdateSettings() throws Exception { // now close the index, change the non dynamic setting, and see that it applies // Wait for the index to turn green before attempting to close it - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -338,7 +342,7 @@ public void testOpenCloseUpdateSettings() throws Exception { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getNumberOfReplicas(), equalTo(1)); indicesAdmin().prepareUpdateSettings("test") @@ -349,7 +353,7 @@ public void testOpenCloseUpdateSettings() throws Exception { ) // this one can't .get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), equalTo("none")); @@ -363,7 +367,7 @@ public void testOpenCloseUpdateSettings() throws Exception { ) // this one really can't ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -426,22 +430,42 @@ public void testSettingsVersion() { ensureGreen("test"); { - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "500ms")) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); } { final boolean block = randomBoolean(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block == false)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); // if the read-only block is present, remove it @@ -458,12 +482,22 @@ public void testSettingsVersionUnchanged() { ensureGreen("test"); { - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final String refreshInterval = indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.refresh_interval"); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", refreshInterval)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); } @@ -471,9 +505,19 @@ public void testSettingsVersionUnchanged() { final boolean block = randomBoolean(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); // now put the same block again - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); // if the read-only block is present, remove it @@ -493,14 +537,24 @@ public void testSettingsVersionUnchanged() { public void testNumberOfReplicasSettingsVersionUnchanged() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int numberOfReplicas = Integer.valueOf( indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas") ); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); } @@ -512,7 +566,12 @@ public void testNumberOfReplicasSettingsVersionUnchanged() { public void testNumberOfReplicasSettingsVersion() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int numberOfReplicas = Integer.valueOf( indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas") ); @@ -520,7 +579,12 @@ public void testNumberOfReplicasSettingsVersion() { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.number_of_replicas", 1 + numberOfReplicas)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); } @@ -574,7 +638,7 @@ public void testNoopUpdate() { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) ); assertNotSame(currentState, clusterService.state()); - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNoInitializingShards(true) .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 6b1aafe2f9b17..5501b88d8a267 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -47,7 +47,7 @@ public void testCloseAllRequiresName() { } private void assertIndexIsClosed(String... indices) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); for (String index : indices) { IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().indices().get(index); assertNotNull(indexMetadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index d52294d7584b8..3947ae6d2b577 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -162,7 +162,7 @@ public void testCloseUnassignedIndex() throws Exception { .setSettings(Settings.builder().put("index.routing.allocation.include._name", "nothing").build()) ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().indices().get(indexName).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().allShards().allMatch(ShardRouting::unassigned), is(true)); @@ -182,7 +182,7 @@ public void testConcurrentClose() throws InterruptedException, ExecutionExceptio IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForYellowStatus() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) @@ -243,7 +243,7 @@ public void testCloseWhileDeletingIndices() throws Exception { } indices[i] = indexName; } - assertThat(clusterAdmin().prepareState().get().getState().metadata().indices().size(), equalTo(indices.length)); + assertThat(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().size(), equalTo(indices.length)); startInParallel(indices.length * 2, i -> { final String index = indices[i % indices.length]; @@ -285,7 +285,7 @@ public void testConcurrentClosesAndOpens() throws Exception { indexer.stopAndAwaitStopped(); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (clusterState.metadata().indices().get(indexName).getState() == IndexMetadata.State.CLOSE) { assertIndexIsClosed(indexName); assertAcked(indicesAdmin().prepareOpen(indexName)); @@ -310,7 +310,7 @@ public void testCloseIndexWaitForActiveShards() throws Exception { ensureGreen(indexName); final CloseIndexResponse closeIndexResponse = indicesAdmin().prepareClose(indexName).get(); - assertThat(clusterAdmin().prepareHealth(indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); assertTrue(closeIndexResponse.isAcknowledged()); assertTrue(closeIndexResponse.isShardsAcknowledged()); assertThat(closeIndexResponse.getIndices().get(0), notNullValue()); @@ -532,7 +532,7 @@ private static void closeIndices(final CloseIndexRequestBuilder requestBuilder) } static void assertIndexIsClosed(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); @@ -555,7 +555,7 @@ static void assertIndexIsClosed(final String... indices) { } static void assertIndexIsOpened(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); assertThat(indexMetadata.getState(), is(IndexMetadata.State.OPEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index a7a2af57ef810..9ec7ebcf91535 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -49,7 +49,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseOpen() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").get(); @@ -70,7 +70,7 @@ public void testSimpleOpenMissingIndex() { public void testOpenOneMissingIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); Exception e = expectThrows(IndexNotFoundException.class, client.admin().indices().prepareOpen("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); @@ -79,7 +79,7 @@ public void testOpenOneMissingIndex() { public void testOpenOneMissingIndexIgnoreMissing() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); OpenIndexResponse openIndexResponse = client.admin() .indices() @@ -94,7 +94,7 @@ public void testOpenOneMissingIndexIgnoreMissing() { public void testCloseOpenMultipleIndices() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").get(); @@ -116,7 +116,7 @@ public void testCloseOpenMultipleIndices() { public void testCloseOpenWildcard() { Client client = client(); createIndex("test1", "test2", "a"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").get(); @@ -133,7 +133,7 @@ public void testCloseOpenWildcard() { public void testCloseOpenAll() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").get(); @@ -149,7 +149,7 @@ public void testCloseOpenAll() { public void testCloseOpenAllWildcard() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").get(); @@ -175,7 +175,7 @@ public void testOpenNullIndex() { public void testOpenAlreadyOpenedIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); // no problem if we try to open an index that's already in open state @@ -188,7 +188,7 @@ public void testOpenAlreadyOpenedIndex() { public void testSimpleCloseOpenAlias() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").get(); @@ -207,7 +207,7 @@ public void testSimpleCloseOpenAlias() { public void testCloseOpenAliasMultipleIndices() { Client client = client(); createIndex("test1", "test2"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").get(); @@ -240,7 +240,7 @@ public void testOpenWaitingForActiveShardsFailed() throws Exception { assertThat(response.isShardsAcknowledged(), equalTo(false)); assertBusy( () -> assertThat( - client.admin().cluster().prepareState().get().getState().metadata().index("test").getState(), + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN) ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 3c16e0f2624ed..1b0b71b86f074 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -159,7 +159,7 @@ private Releasable interceptVerifyShardBeforeCloseActions(final String indexPatt } private static void assertIndexIsBlocked(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { assertThat(clusterState.metadata().indices().get(index).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().index(index), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index b5448498f0ce9..c62f1776178ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -34,7 +34,7 @@ public void testSimpleOpenClose() { NumShards numShards = getNumShards("test"); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); assertEquals( @@ -48,7 +48,7 @@ public void testSimpleOpenClose() { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -66,7 +66,7 @@ public void testSimpleOpenClose() { logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); @@ -86,7 +86,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { .setSettings(Settings.builder().put("index.routing.allocation.include.tag", "no_such_node").build()) .get(); - ClusterHealthResponse health = clusterAdmin().prepareHealth("test").setWaitForNodes(">=2").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForNodes(">=2").get(); assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); @@ -102,7 +102,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { NumShards numShards = getNumShards("test"); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); assertEquals( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 7ffc2539d2fa0..083823523afaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -237,7 +237,7 @@ public void testClearAllCaches() throws Exception { .setMapping("field", "type=text,fielddata=true") ); ensureGreen(); - clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); prepareIndex("test").setId("1").setSource("field", "value1").get(); prepareIndex("test").setId("2").setSource("field", "value2").get(); indicesAdmin().prepareRefresh().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 5eeb07968ce4d..58c1a30cb5513 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -98,7 +98,7 @@ public void testIndexCleanup() throws Exception { ) ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); @@ -110,7 +110,10 @@ public void testIndexCleanup() throws Exception { logger.info("--> starting node server3"); final String node_3 = internalCluster().startNode(nonMasterNode()); logger.info("--> running cluster_health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNodes("4").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForNodes("4") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); @@ -131,7 +134,7 @@ public void testIndexCleanup() throws Exception { } else { ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand("test", 0, node_1, node_3)); } - clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertShardDeleted(node_1, index, 0); @@ -197,13 +200,13 @@ public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted( ) ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build()); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get().isTimedOut()); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); @@ -226,7 +229,7 @@ public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted( logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2)); shardActiveRequestSent.await(); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); logClusterState(); // delete the index. node_1 that still waits for the next cluster state update will then get the delete index next. @@ -258,7 +261,7 @@ public void testShardsCleanup() throws Exception { ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); @@ -267,7 +270,10 @@ public void testShardsCleanup() throws Exception { logger.info("--> starting node server3"); String node_3 = internalCluster().startNode(); logger.info("--> running cluster_health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForNodes("3") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> making sure that shard is not allocated on server3"); @@ -278,7 +284,7 @@ public void testShardsCleanup() throws Exception { internalCluster().stopNode(node_2); logger.info("--> running cluster_health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -325,7 +331,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { ) ); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForGreenStatus() .setWaitForNodes("5") @@ -344,7 +350,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { internalCluster().stopNode(nodesToShutDown.get(1)); logger.debug("--> verifying index is red"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("3").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").get(); if (health.getStatus() != ClusterHealthStatus.RED) { logClusterState(); fail("cluster didn't become red, despite of shutting 2 of 3 nodes"); @@ -362,7 +368,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { assertBusy(() -> assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index))); // wait for 4 active shards - we should have lost one shard - assertFalse(clusterAdmin().prepareHealth().setWaitForActiveShards(4).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForActiveShards(4).get().isTimedOut()); // disable allocation again to control concurrency a bit and allow shard active to kick in before allocation updateClusterSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); @@ -371,7 +377,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { internalCluster().startNodes(node1DataPathSettings, node2DataPathSettings); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("5").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("5").get().isTimedOut()); updateClusterSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")); @@ -395,7 +401,7 @@ public void testShardActiveElseWhere() throws Exception { ensureGreen("test"); waitNoPendingTasksOnAll(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); final Index index = stateResponse.getState().metadata().index("test").getIndex(); RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonMasterId); final int[] node2Shards = new int[routingNode.numberOfOwningShards()]; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 1e1333f376e9f..4051dba84588a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -178,7 +178,7 @@ public void testSimpleIndexTemplateTests() throws Exception { } public void testDeleteIndexTemplate() throws Exception { - final int existingTemplates = admin().cluster().prepareState().get().getState().metadata().templates().size(); + final int existingTemplates = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(); logger.info("--> put template_1 and template_2"); indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) @@ -223,7 +223,7 @@ public void testDeleteIndexTemplate() throws Exception { logger.info("--> explicitly delete template_1"); indicesAdmin().prepareDeleteTemplate("template_1").get(); - ClusterState state = admin().cluster().prepareState().get().getState(); + ClusterState state = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().templates().size(), equalTo(1 + existingTemplates)); assertThat(state.metadata().templates().containsKey("template_2"), equalTo(true)); @@ -254,11 +254,14 @@ public void testDeleteIndexTemplate() throws Exception { logger.info("--> delete template*"); indicesAdmin().prepareDeleteTemplate("template*").get(); - assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(existingTemplates)); + assertThat( + admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(), + equalTo(existingTemplates) + ); logger.info("--> delete * with no templates, make sure we don't get a failure"); indicesAdmin().prepareDeleteTemplate("*").get(); - assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(0)); + assertThat(admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(), equalTo(0)); } public void testThatGetIndexTemplatesWorks() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java index a3c43de39218d..0fa1ef1208593 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -158,7 +158,7 @@ private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).get(); ReservedStateMetadata reservedState = clusterStateResponse.getState() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java index 9b60044c94f70..eec90241fd902 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java @@ -24,7 +24,10 @@ public class SimpleNodesCapabilitiesIT extends ESIntegTestCase { public void testNodesCapabilities() throws IOException { internalCluster().startNodes(2); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); // check we support the capabilities API itself. Which we do. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index a5700c319aa59..e4c83c81a7684 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -33,7 +33,10 @@ public void testNodesInfos() { final String node_1 = nodesNames.get(0); final String node_2 = nodesNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); @@ -72,7 +75,10 @@ public void testNodesInfosTotalIndexingBuffer() { final String node_1 = nodesNames.get(0); final String node_2 = nodesNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); @@ -110,7 +116,10 @@ public void testAllocatedProcessors() throws Exception { final String node_1 = nodeNames.get(0); final String node_2 = nodeNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java index a49fadb0c4b5b..a851ecb11c798 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -84,7 +84,7 @@ public void testCloseIndexDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareClose("*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { assertEquals(IndexMetadata.State.CLOSE, indexMetadataEntry.getValue().getState()); } @@ -117,7 +117,7 @@ public void testOpenIndexDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareOpen("*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { assertEquals(IndexMetadata.State.OPEN, indexMetadataEntry.getValue().getState()); } @@ -150,7 +150,7 @@ public void testAddIndexBlockDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareAddBlock(WRITE, "*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue("write block is set on index1", state.getBlocks().hasIndexBlock("index1", IndexMetadata.INDEX_WRITE_BLOCK)); assertTrue("write block is set on 1index", state.getBlocks().hasIndexBlock("1index", IndexMetadata.INDEX_WRITE_BLOCK)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index e7d23f97fc992..caaea0a8a3846 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -110,7 +110,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { } private void assertEnableAssignmentSetting(final Allocation expected) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get(); Settings settings = clusterStateResponse.getState().getMetadata().settings(); String value = settings.get(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index db26d630fefea..21aa8a8770359 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -73,7 +73,7 @@ public void testListenersInvokedWhenIndexIsDeleted() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) @@ -140,7 +140,7 @@ public void testListenersInvokedWhenIndexIsRelocated() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) @@ -206,7 +206,7 @@ public void testListenersInvokedWhenIndexIsDangling() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index 8335b3c0c4249..6be1612c32ad8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -111,13 +111,16 @@ protected Collection> getMockPlugins() { } private void assertMasterNode(Client client, String node) { - assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void expectMasterNotFound() { expectThrows( MasterNotDiscoveredException.class, - clusterAdmin().prepareState().setMasterNodeTimeout(TimeValue.timeValueMillis(100)) + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setMasterNodeTimeout(TimeValue.timeValueMillis(100)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java index da59d306d4119..adc9db1cb3ed1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -62,7 +62,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -76,7 +76,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -94,7 +94,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -105,7 +105,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -123,7 +123,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -135,7 +135,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is yellow, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForYellowStatus() @@ -168,7 +168,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } ensureGreen(); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { assertNotEquals( @@ -186,7 +186,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { } internalCluster().restartRandomDataNode(); ensureGreen(); - clusterAdmin().prepareState().get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); recoveryResponse = indicesAdmin().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 70aabbc8c30d5..c066e3098df6f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -102,7 +102,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> waiting for GREEN health status ..."); // make sure the cluster state is green, and all has been recovered assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -163,7 +163,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr logger.info("--> waiting for GREEN health status ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -225,7 +225,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception logger.info("--> waiting for GREEN health status ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -242,7 +242,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 3); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -252,7 +252,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 2); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -262,7 +262,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 1); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -273,7 +273,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception logger.info("--> indexing threads stopped"); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java index 17daf403e0566..52a95b2065866 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java @@ -140,7 +140,7 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -149,7 +149,7 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> relocate the shard from node1 to node2"); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2)); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -184,7 +184,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> starting [node{}] ...", i); nodes[i - 1] = internalCluster().startNode(); if (i != numberOfNodes) { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i)) .setWaitForGreenStatus() @@ -215,7 +215,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.debug("--> flushing"); indicesAdmin().prepareFlush().get(); } - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -288,7 +288,7 @@ public void testRelocationWhileRefreshing() throws Exception { logger.info("--> starting [node_{}] ...", i); nodes[i] = internalCluster().startNode(); if (i != numberOfNodes - 1) { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i + 1)) .setWaitForGreenStatus() @@ -349,7 +349,7 @@ public void indexShardStateChanged( // verify cluster was finished. assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(30)) @@ -390,7 +390,7 @@ public void testCancellationCleansTempFiles() throws Exception { requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); flush(); int allowedFailures = randomIntBetween(3, 5); // the default of the `index.allocation.max_retries` is 5. @@ -418,7 +418,7 @@ public void testCancellationCleansTempFiles() throws Exception { if (node.equals(p_node)) { continue; } - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat( node + " indicates assigned replicas", state.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).size(), @@ -551,7 +551,7 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> start another node"); final String node2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -560,7 +560,7 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> relocate the shard from node1 to node2"); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node1, node2)); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -602,7 +602,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> start another node"); final String node2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -623,7 +623,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E ); } safeGet(relocationListener); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -670,7 +670,7 @@ public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Except private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception { assertBusy(() -> { - for (String index : clusterAdmin().prepareState().get().getState().metadata().indices().keySet()) { + for (String index : clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().keySet()) { Map> byShardId = Stream.of(indicesAdmin().prepareStats(index).get().getShards()) .collect(Collectors.groupingBy(l -> l.getShardRouting().shardId())); for (List shardStats : byShardId.values()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java index 0b56eb36c08e4..a1aecab66bbfc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java @@ -28,7 +28,7 @@ public void testNotStale() throws Exception { ensureGreen(); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata target = clusterStateResponse.getState().getMetadata().index("test"); internalCluster().restartNode(replica, new InternalTestCluster.RestartCallback() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index 6c7bcd17af1f0..f9dc42cb7abe8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -137,7 +137,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final SnapshotInfo snapshotInfo = createSnapshot(repoName, Strings.format("snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; - ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().get(); + ClusterStateResponse clusterStateResponse = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); expectedIndexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 3eb05aa36b1b5..b8ada92c9033b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -66,7 +66,10 @@ public void testRepeatCleanupsDontRemove() throws Exception { ); logger.info("--> ensure cleanup is still in progress"); - final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); + final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .custom(RepositoryCleanupInProgress.TYPE); assertTrue(cleanup.hasCleanupInProgress()); logger.info("--> unblocking master node"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 4ce92610eff17..82b6ba15930b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -357,7 +357,10 @@ public class ComponentTemplatesFileSettingsIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) throws ExecutionException, InterruptedException { - assertThat(client.admin().cluster().prepareState().execute().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void writeJSONFile(String node, String json) throws Exception { @@ -403,7 +406,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); Map allTemplates = clusterStateResponse.getState().metadata().templatesV2(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 2fe808d813ccc..049a58b633556 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -102,7 +102,10 @@ public class FileSettingsServiceIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) { - assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger) throws Exception { @@ -169,7 +172,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(savedClusterState.await(20, TimeUnit.SECONDS)); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); assertThat( @@ -177,7 +180,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo equalTo(expectedBytesPerSec) ); - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") ); assertThat( @@ -257,7 +260,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { logger.info("--> restart master"); internalCluster().restartNode(masterNode); - final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( clusterStateResponse.getState() .metadata() @@ -300,7 +303,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); assertThat(clusterStateResponse.getState().metadata().persistentSettings().get("search.allow_expensive_queries"), nullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 1ca2526b53dff..7cec6e895a52e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -94,7 +94,10 @@ public class RepositoriesFileSettingsIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) throws ExecutionException, InterruptedException { - assertThat(client.admin().cluster().prepareState().execute().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void writeJSONFile(String node, String json) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index 049260e14100f..087274a86221e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -133,7 +133,7 @@ private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedCluste boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - return clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())).get(); + return clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get())).get(); } public void testRestoreWithRemovedFileSettings() throws Exception { @@ -180,14 +180,15 @@ public void testRestoreWithRemovedFileSettings() throws Exception { ensureGreen(); - final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).metadata(true)) + .actionGet(); // We expect no reserved metadata state for file based settings, the operator file was deleted. assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat( @@ -305,14 +306,14 @@ public void testRestoreWithPersistedFileSettings() throws Exception { logger.info("--> reserved state would be restored to non-zero version"); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) ).actionGet(); assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 502f02d9ce17f..2ab77444f86b9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -51,7 +51,12 @@ public void testRollingRestartOfTwoNodeCluster() throws Exception { ); ensureGreen("test"); - final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .nodes(); final Map nodeIdsByName = Maps.newMapWithExpectedSize(discoveryNodes.getSize()); discoveryNodes.forEach(n -> nodeIdsByName.put(n.getName(), n.getId())); @@ -98,7 +103,7 @@ public Settings onNodeStopped(String nodeName) throws IOException { ClusterHealthResponse clusterHealthResponse = client(viaNode).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(1)) .setTimeout(TimeValue.timeValueSeconds(30L)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 53001e30763a0..1e18f156f1fcf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -51,7 +51,7 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt public void testResolveIndexRouting() { createIndex("test1"); createIndex("test2"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) @@ -93,7 +93,7 @@ public void testResolveSearchRouting() { createIndex("test1"); createIndex("test2"); createIndex("test3"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java index e25da54d7b214..20c197bf73893 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -93,8 +93,13 @@ public void testShrinking() throws Exception { Settings.builder() .put( "index.routing.allocation.require._name", - clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values().toArray(DiscoveryNode[]::new)[0] - .getName() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getDataNodes() + .values() + .toArray(DiscoveryNode[]::new)[0].getName() ) .put("index.blocks.write", true), index diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index f59ec4d42089e..2eb37291d41cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -49,7 +49,7 @@ protected int minimumNumberOfShards() { } public String findNonMatchingRoutingValue(String index, String id) { - ClusterState state = clusterAdmin().prepareState().all().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); IndexMetadata metadata = state.metadata().index(index); IndexMetadata withoutRoutingRequired = IndexMetadata.builder(metadata).putMapping("{}").build(); IndexRouting indexRouting = IndexRouting.fromIndexMetadata(withoutRoutingRequired); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java index 5625299890b7e..b71dd4a39b198 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java @@ -56,7 +56,7 @@ public void testDroppedOutNode() throws Exception { assertBusy(() -> { final ClusterHealthStatus indexHealthStatus = client(master).admin() .cluster() - .health(new ClusterHealthRequest("test")) + .health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")) .actionGet() .getStatus(); assertThat(indexHealthStatus, Matchers.is(ClusterHealthStatus.RED)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 0ed83f73e418d..17b976bdd3748 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -223,7 +223,7 @@ public void testSimpleWithCustomKeys() throws Exception { } public void testUnmapped() throws Exception { - clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx_unmapped").setWaitForYellowStatus().get(); assertNoFailuresAndResponse( prepareSearch("idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 6a60969e632ee..0c39859856d56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -761,7 +761,7 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx_unmapped").setWaitForYellowStatus().get(); assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 1525496176418..9e2139f832f15 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -99,7 +99,9 @@ private void setClusterDefaultAllowPartialResults(boolean allowPartialResults) { Settings persistentSettings = Settings.builder().put(key, allowPartialResults).build(); - ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings().setPersistentSettings(persistentSettings).get(); + ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(persistentSettings) + .get(); assertAcked(response1); assertEquals(response1.getPersistentSettings().getAsBoolean(key, null), allowPartialResults); @@ -115,10 +117,10 @@ private void buildRedIndex(int numShards) throws Exception { internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForStatus(ClusterHealthStatus.RED).get(); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List unassigneds = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.UNASSIGNED); assertThat(unassigneds.size(), greaterThan(0)); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 68d00321848eb..df6994c57f425 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -61,7 +61,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) logger.info("using preference {}", preference); // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed - ClusterHealthStatus status = clusterAdmin().prepareHealth("test").get().getStatus(); + ClusterHealthStatus status = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(); while (status != ClusterHealthStatus.GREEN) { // first, verify that search normal search works assertHitCount(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")), 1); @@ -97,7 +97,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) assertHitCount(searchResponse, 1); } ); - status = clusterAdmin().prepareHealth("test").get().getStatus(); + status = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(); internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1); } cluster().wipeIndices("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 657158327bf01..a9b0f75fe45ba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -125,7 +125,7 @@ public void run() { threads[j].join(); } // this might time out on some machines if they are really busy and you hit lots of throttling - ClusterHealthResponse resp = clusterAdmin().prepareHealth() + ClusterHealthResponse resp = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 096f533a072b9..3fe93f8d91be3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -102,7 +102,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc ClusterHealthResponse clusterHealthResponse = clusterAdmin() // it's OK to timeout here .health( - new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForYellowStatus() .masterNodeTimeout(TimeValue.timeValueSeconds(5)) .timeout(TimeValue.timeValueSeconds(5)) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 303030a523662..951ea29a09e8e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -74,13 +74,13 @@ public void testFailedSearchWithWrongQuery() throws Exception { allowNodes("test", 2); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").get().isTimedOut(), equalTo(false) ); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = clusterAdmin().health( - new ClusterHealthRequest("test").waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test").waitForYellowStatus() .waitForNoRelocatingShards(true) .waitForEvents(Priority.LANGUID) .waitForActiveShards(test.totalNumShards) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index 8b7f69df9fcc3..23146d1907250 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -670,7 +670,7 @@ private Map setupClusters() { assertFalse( client(clusterAlias).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index e96689ce2846d..9cc359f40d327 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -224,7 +224,7 @@ public void testCancel() throws Exception { assertFalse( client("cluster_a").admin() .cluster() - .prepareHealth("prod") + .prepareHealth(TEST_REQUEST_TIMEOUT, "prod") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 89bc0e83351ad..e772f94e868ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -701,7 +701,7 @@ private Map setupTwoClusters() { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index a9ae215c1ab79..5c26899f2e3f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -110,7 +110,7 @@ public void testSearch() throws Exception { assertFalse( client("cluster_a").admin() .cluster() - .prepareHealth("prod") + .prepareHealth(TEST_REQUEST_TIMEOUT, "prod") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() @@ -169,7 +169,11 @@ protected void configureRemoteCluster(String clusterAlias, Collection se settings.put("cluster.remote." + clusterAlias + ".mode", "proxy"); settings.put("cluster.remote." + clusterAlias + ".proxy_address", seedAddress); - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settings) + .get(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index d42a84677a8f7..d58e777b093ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -65,7 +65,7 @@ public void testPlugin() throws Exception { .endObject() ) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().get(); client().index( new IndexRequest("test").id("1") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java index 8c65d28711c1b..32dc34045cc8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java @@ -106,7 +106,7 @@ private Map setupTwoClusters() { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java index e618a1b75cc4d..e6ecd9f1e3779 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java @@ -115,7 +115,7 @@ public void testRewriteCompoundRetrieverShouldThrowForPartialResults() throws Ex throw new IllegalStateException("node did not stop"); } assertBusy(() -> { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(testIndex) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, testIndex) .setWaitForStatus(ClusterHealthStatus.RED) // we are now known red because the primary shard is missing .setWaitForEvents(Priority.LANGUID) // ensures that the update has occurred .execute() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 433f004acdd77..17a0d6441ca47 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -59,7 +59,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { } refresh(); internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { "_local", "_prefer_nodes:somenode", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 816fe48e5d97f..439534c3e1743 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -68,7 +68,7 @@ public void testNodeSelection() { client.prepareSearch().setQuery(matchAllQuery()).get().decRef(); } - ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().get(); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); Map coordinatingNodes = clusterStateResponse.getState().nodes().getCoordinatingOnlyNodes(); assertEquals(1, coordinatingNodes.size()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 03c217266d527..24a3d3ac422f3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -70,9 +70,9 @@ public void cleanup() throws Exception { public void testSimpleScrollQueryThenFetch() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -119,9 +119,9 @@ public void testSimpleScrollQueryThenFetch() throws Exception { public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { String routing = "0"; @@ -189,7 +189,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E public void testScrollAndUpdateIndex() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 5)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 500; i++) { prepareIndex("test").setId(Integer.toString(i)) @@ -241,9 +241,9 @@ public void testScrollAndUpdateIndex() throws Exception { public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -360,9 +360,9 @@ public void testClearIllegalScrollId() throws Exception { public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -553,7 +553,7 @@ public void testCloseAndReopenOrDeleteWithActiveScroll() { public void testScrollInvalidDefaultKeepAlive() throws IOException { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) ); assertThat(exc.getMessage(), containsString("was (2m > 1m)")); @@ -564,7 +564,8 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) ); assertThat(exc.getMessage(), containsString("was (3m > 2m)")); @@ -572,7 +573,8 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) ); assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 23384d1b199f9..e5ca2c6968bb9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -156,7 +156,7 @@ public void testSimpleStats() throws Exception { } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -239,7 +239,7 @@ public void testOpenContexts() { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index f61cce863ce59..5bdf156e39999 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -971,7 +971,7 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(testIndex).get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, testIndex).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 041d722591391..08f9d74ab477e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -106,7 +106,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { .setWaitForCompletion(true) .get(); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); if (isSnapshotMetadataSet) { assertThat(metadata.custom(SnapshotMetadata.TYPE).getData(), equalTo("before_snapshot_s")); @@ -127,7 +127,7 @@ public void testShouldKeepGatewayMetadataAfterRestart() throws Exception { internalCluster().fullRestart(); ensureYellow(); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that gateway custom metadata [{}] survived full cluster restart", metadata); assertThat(metadata.custom(GatewayMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); assertThat(metadata.custom(ApiMetadata.TYPE), nullValue()); @@ -140,7 +140,7 @@ public void testShouldExposeApiMetadata() throws Exception { metadataBuilder.putCustom(NonApiMetadata.TYPE, new NonApiMetadata("before_restart_ns")); })); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that api custom metadata [{}] is visible via api", metadata); assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); assertThat(metadata.custom(NonApiMetadata.TYPE), nullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 19f051404bce0..3788f2dd2cb61 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -245,7 +245,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> shutdown one of the nodes"); internalCluster().stopRandomDataNode(); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(1)) .setWaitForNodes("<2") @@ -432,7 +432,7 @@ public boolean clearData(String nodeName) { }); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(1)) .setWaitForNodes("2") @@ -666,7 +666,7 @@ public void testRestoreShrinkIndex() throws Exception { assertAcked(indicesAdmin().prepareDelete(sourceIdx).get()); assertAcked(indicesAdmin().prepareDelete(shrunkIdx).get()); internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1"); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1"); logger.info("--> start a new data node"); final Settings dataSettings = Settings.builder() @@ -674,7 +674,7 @@ public void testRestoreShrinkIndex() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) // to get a new node id .build(); internalCluster().startDataOnlyNode(dataSettings); - clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); logger.info("--> restore the shrunk index and ensure all shards are allocated"); RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) @@ -1127,7 +1127,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(indexName).get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 0fd96b96c8756..2daa36ee00a01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -142,7 +142,7 @@ public void testConcurrentDeleteFromOtherCluster() { + repoNameOnFirstCluster + "] concurrent modification of the index-N file, expected current generation [2] but it was not found in " + "the repository. The last cluster to write to this repository was [" - + secondCluster.client().admin().cluster().prepareState().get().getState().metadata().clusterUUID() + + secondCluster.client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().clusterUUID() + "] at generation [4]." ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index d4c0a4c80a3b5..6870a1d6b2649 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -75,7 +75,12 @@ public void testRepositoryCreation() throws Exception { assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); logger.info("--> check that repository is really there"); - ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .get(); Metadata metadata = clusterStateResponse.getState().getMetadata(); RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); @@ -86,7 +91,7 @@ public void testRepositoryCreation() throws Exception { createRepository("test-repo-2", "fs"); logger.info("--> check that both repositories are in cluster state"); - clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get(); metadata = clusterStateResponse.getState().getMetadata(); repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); @@ -117,7 +122,7 @@ public void testRepositoryCreation() throws Exception { .isAcknowledged(), equalTo(true) ); - assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + assertEquals(beforeStateUuid, client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).clear().get().getState().stateUUID()); logger.info("--> delete repository test-repo-1"); client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 7626e59cd1b9d..725fcbc1a5849 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -224,7 +224,7 @@ public void testRestoreIncreasesPrimaryTerms() { } } - final IndexMetadata indexMetadata = clusterAdmin().prepareState() + final IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices(indexName) .setMetadata(true) @@ -251,7 +251,7 @@ public void testRestoreIncreasesPrimaryTerms() { assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final IndexMetadata restoredIndexMetadata = clusterAdmin().prepareState() + final IndexMetadata restoredIndexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices(indexName) .setMetadata(true) @@ -307,7 +307,13 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); - MappingMetadata mappings = clusterAdmin().prepareState().get().getState().getMetadata().getIndices().get("test-idx").mapping(); + MappingMetadata mappings = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getMetadata() + .getIndices() + .get("test-idx") + .mapping(); assertThat(mappings.sourceAsMap().toString(), containsString("baz")); assertThat(mappings.sourceAsMap().toString(), not(containsString("foo"))); @@ -818,7 +824,14 @@ public void testRecreateBlocksOnRestore() throws Exception { .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ClusterBlocks blocks = client.admin().cluster().prepareState().clear().setBlocks(true).get().getState().blocks(); + ClusterBlocks blocks = client.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setBlocks(true) + .get() + .getState() + .blocks(); // compute current index settings (as we cannot query them if they contain SETTING_BLOCKS_METADATA) Settings mergedSettings = Settings.builder().put(initialSettings).put(changedSettings).build(); logger.info("--> merged block settings {}", mergedSettings); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index cd57401550f12..08daeaaec016b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -228,7 +228,7 @@ public void testBasicWorkFlow() throws Exception { ensureGreen(); assertDocCount("test-idx-1", 100); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); @@ -511,7 +511,7 @@ public void testDataFileFailureDuringRestore() throws Exception { // same node again during the same reroute operation. Then another reroute // operation is scheduled, but the RestoreInProgressAllocationDecider will // block the shard to be assigned again because it failed during restore. - final ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().get(); + final ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); assertEquals(1, clusterStateResponse.getState().getNodes().getDataNodes().size()); assertEquals( restoreInfo.failedShards(), @@ -663,7 +663,10 @@ private void unrestorableUseCase( assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(0)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).setRoutingTable(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .setCustoms(true) + .setRoutingTable(true) + .get(); // check that there is no restore in progress RestoreInProgress restoreInProgress = clusterStateResponse.getState().custom(RestoreInProgress.TYPE); @@ -867,7 +870,7 @@ public void testSnapshotClosedIndex() throws Exception { ensureGreen(); logger.info("--> closing index test-idx-closed"); assertAcked(client.admin().indices().prepareClose("test-idx-closed")); - ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get(); + ClusterStateResponse stateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test-idx-closed").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), notNullValue()); @@ -1261,7 +1264,7 @@ public void testSnapshotRelocatingPrimary() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth("test-idx").get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test-idx").get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -2185,7 +2188,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(normalIndex, hiddenIndex, dottedHiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(true)); @@ -2205,7 +2208,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(normalIndex, hiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(false)); @@ -2225,7 +2228,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(hiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(false)); @@ -2245,7 +2248,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(dottedHiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index 6c91db0ad7228..92e0b437cbebb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -36,14 +36,14 @@ public void testExceptionWhenRestoringPersistentSettings() { Client client = client(); Consumer setSettingValue = value -> client.admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(BrokenSettingPlugin.BROKEN_SETTING.getKey(), value)) .get(); Consumer assertSettingValue = value -> assertThat( client.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setRoutingTable(false) .setNodes(false) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index 2d1e16dc64273..3d16293c1462b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -177,7 +177,12 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { SubscribableListener.newForked( l -> client().execute( TransportAddVotingConfigExclusionsAction.TYPE, - new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { masterName }, TimeValue.timeValueSeconds(10)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + Strings.EMPTY_ARRAY, + new String[] { masterName }, + TimeValue.timeValueSeconds(10) + ), l ) ) @@ -212,7 +217,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { // flush master queue to ensure the completion is applied everywhere safeAwait( SubscribableListener.newForked( - l -> client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute(l) + l -> client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).execute(l) ) ); @@ -230,7 +235,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { } safeAwait(SubscribableListener.newForked(l -> { - final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT); clearVotingConfigExclusionsRequest.setWaitForRemoval(false); client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest, l); })); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index fb282b4bf6a48..d4e27d5630c5b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -738,7 +738,7 @@ public void testInfiniteTimeout() throws Exception { try { waitForBlockOnAnyDataNode("test-repo"); // Make sure that the create-snapshot task completes on master - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setMasterNodeTimeout(TimeValue.MINUS_ONE) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 9c9076dff00e2..e8d61e4677c98 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -89,7 +89,12 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase { public void testRandomActivities() throws InterruptedException { - final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .nodes(); new TrackedCluster(internalCluster(), nodeNames(discoveryNodes.getMasterNodes()), nodeNames(discoveryNodes.getDataNodes())).run(); disableRepoConsistencyCheck("have not necessarily written to all repositories"); } @@ -354,14 +359,20 @@ public void run() throws InterruptedException { if (failedPermitAcquisitions.isEmpty() == false) { logger.warn("--> failed to acquire all permits: {}", failedPermitAcquisitions); - logger.info("--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true)); + logger.info( + "--> current cluster state:\n{}", + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) + ); fail("failed to acquire all permits: " + failedPermitAcquisitions); } logger.info("--> acquired all permits"); if (ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS) == false) { logger.warn("--> threadpool termination timed out"); - logger.info("--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true)); + logger.info( + "--> current cluster state:\n{}", + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) + ); } } @@ -381,7 +392,7 @@ private void acquirePermitsAtEnd( logger.warn("--> failed to acquire permit [{}]", label); logger.info( "--> current cluster state:\n{}", - Strings.toString(clusterAdmin().prepareState().get().getState(), true, true) + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) ); HotThreads.logLocalHotThreads( logger, @@ -1604,7 +1615,7 @@ Releasable tryAcquirePartialSnapshottingPermit() { // Prepares a health request with twice the default (30s) timeout that waits for all cluster tasks to finish as well as all cluster // nodes before returning private static ClusterHealthRequestBuilder prepareClusterHealthRequest(String... targetIndexNames) { - return clusterAdmin().prepareHealth(targetIndexNames) + return clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, targetIndexNames) .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForNodes(Integer.toString(internalCluster().getNodeNames().length)) .setWaitForEvents(Priority.LANGUID); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index 706ceaad7905c..32f76bdcc61e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -704,7 +704,11 @@ public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Except // Stop a random data node so we lose a shard from the partial index internalCluster().stopRandomDataNode(); - assertBusy(() -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth().get().getStatus()), 30, TimeUnit.SECONDS); + assertBusy( + () -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus()), + 30, + TimeUnit.SECONDS + ); // Get ready to block blockMasterFromFinalizingSnapshotOnIndexFile(REPO_NAME); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 82e4e4123e4fe..139ef58e4292e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -40,24 +40,28 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index 11bdd41f458d3..f71def58820a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -27,6 +26,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -102,17 +102,12 @@ public ClusterState afterBatchExecution(ClusterState clusterState, boolean clust } public static class Request extends AcknowledgedRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } public Request(StreamInput in) throws IOException { super(in); } - - @Override - public ActionRequestValidationException validate() { - return null; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index 3d8cdb4b405f8..550db4892a673 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -47,8 +48,15 @@ public class UpdateDesiredNodesRequest extends AcknowledgedRequest DesiredNode.fromXContent(p), NODES_FIELD); } - public UpdateDesiredNodesRequest(String historyID, long version, List nodes, boolean dryRun) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public UpdateDesiredNodesRequest( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String historyID, + long version, + List nodes, + boolean dryRun + ) { + super(masterNodeTimeout, ackTimeout); assert historyID != null; assert nodes != null; this.historyID = historyID; @@ -80,10 +88,16 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static UpdateDesiredNodesRequest fromXContent(String historyID, long version, boolean dryRun, XContentParser parser) - throws IOException { + public static UpdateDesiredNodesRequest fromXContent( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String historyID, + long version, + boolean dryRun, + XContentParser parser + ) throws IOException { List nodes = PARSER.parse(parser, null); - return new UpdateDesiredNodesRequest(historyID, version, nodes, dryRun); + return new UpdateDesiredNodesRequest(masterNodeTimeout, ackTimeout, historyID, version, nodes, dryRun); } public String getHistoryID() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 2b60e2d4a5ffa..2344c8f99ceda 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -37,12 +37,12 @@ public class ClusterHealthRequest extends MasterNodeReadRequest { - public ClusterHealthRequestBuilder(ElasticsearchClient client) { - super(client, TransportClusterHealthAction.TYPE, new ClusterHealthRequest()); + public ClusterHealthRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout) { + super(client, TransportClusterHealthAction.TYPE, new ClusterHealthRequest(masterNodeTimeout)); } public ClusterHealthRequestBuilder setIndices(String... indices) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java index ba2ad88917a96..dfb229c4d5336 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -19,8 +20,8 @@ */ public class GetFeatureUpgradeStatusRequest extends MasterNodeRequest { - public GetFeatureUpgradeStatusRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetFeatureUpgradeStatusRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public GetFeatureUpgradeStatusRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java index 36a90ae9afe33..84321de66a339 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -19,8 +20,8 @@ */ public class PostFeatureUpgradeRequest extends MasterNodeRequest { - public PostFeatureUpgradeRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public PostFeatureUpgradeRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public PostFeatureUpgradeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index 5bde01195e35c..1d70d25478b12 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -33,8 +33,8 @@ public class PrevalidateNodeRemovalRequest extends MasterNodeReadRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index c4e40f1b208b4..bf874c3c38870 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -35,10 +36,13 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest PARSER = new ObjectParser<>( + public interface Factory { + ClusterUpdateSettingsRequest create(); + } + + private static final ObjectParser PARSER = ObjectParser.fromBuilder( "cluster_update_settings_request", - false, - ClusterUpdateSettingsRequest::new + Factory::create ); static { @@ -55,8 +59,8 @@ public ClusterUpdateSettingsRequest(StreamInput in) throws IOException { persistentSettings = readSettingsFromStream(in); } - public ClusterUpdateSettingsRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public ClusterUpdateSettingsRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } @Override @@ -188,7 +192,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterUpdateSettingsRequest fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public static ClusterUpdateSettingsRequest fromXContent(Factory factory, XContentParser parser) { + return PARSER.apply(parser, factory); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 31ab3223bbb49..a7682add8b092 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentType; import java.util.Map; @@ -23,8 +24,8 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> { - public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client) { - super(client, ClusterUpdateSettingsAction.INSTANCE, new ClusterUpdateSettingsRequest()); + public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(client, ClusterUpdateSettingsAction.INSTANCE, new ClusterUpdateSettingsRequest(masterNodeTimeout, ackTimeout)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index 8990112a30579..d64de6e452e7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -38,8 +38,8 @@ public class ClusterStateRequest extends MasterNodeReadRequest { - public ClusterStateRequestBuilder(ElasticsearchClient client) { - super(client, ClusterStateAction.INSTANCE, new ClusterStateRequest()); + public ClusterStateRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout) { + super(client, ClusterStateAction.INSTANCE, new ClusterStateRequest(masterNodeTimeout)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index b27a8d0aacb72..976948dc722fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -11,13 +11,14 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; public class PendingClusterTasksRequest extends MasterNodeReadRequest { - public PendingClusterTasksRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public PendingClusterTasksRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public PendingClusterTasksRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 4d5a670925b5b..1509e398fbffa 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -158,8 +158,8 @@ public void health(final ClusterHealthRequest request, final ActionListener state(final ClusterStateRequest request) { @@ -170,8 +170,8 @@ public void state(final ClusterStateRequest request, final ActionListener updateSettings(final ClusterUpdateSettingsRequest request) { @@ -182,8 +182,8 @@ public void updateSettings(final ClusterUpdateSettingsRequest request, final Act execute(ClusterUpdateSettingsAction.INSTANCE, request, listener); } - public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { - return new ClusterUpdateSettingsRequestBuilder(this); + public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + return new ClusterUpdateSettingsRequestBuilder(this, masterNodeTimeout, ackTimeout); } public ActionFuture nodesInfo(final NodesInfoRequest request) { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java index 5bead071cb4b6..ebc0d5cdf50ec 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsUpdater; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -58,7 +59,11 @@ private static ClusterUpdateSettingsRequest prepare(Object input, Set pr toDelete.removeAll(newSettings.keys()); toDelete.forEach(k -> newSettings.put(k, (String) null)); - final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest( + // timeouts are unused, any value will do + TimeValue.THIRTY_SECONDS, + TimeValue.THIRTY_SECONDS + ); clusterUpdateSettingsRequest.persistentSettings(newSettings); return clusterUpdateSettingsRequest; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java index 74ecc85e960b5..28dc14e3112f5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java @@ -77,13 +77,12 @@ static AddVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(fin nodeNames = request.param("node_names"); } - final var resolvedRequest = new AddVotingConfigExclusionsRequest( + return new AddVotingConfigExclusionsRequest( + getMasterNodeTimeout(request), Strings.splitStringByCommaToArray(nodeIds), Strings.splitStringByCommaToArray(nodeNames), request.paramAsTime("timeout", DEFAULT_TIMEOUT) ); - - return resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java index ff26648476926..0e7b6ecac25d8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java @@ -45,8 +45,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest request, final No } static ClearVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(final RestRequest request) { - final var resolvedRequest = new ClearVotingConfigExclusionsRequest(); - resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var resolvedRequest = new ClearVotingConfigExclusionsRequest(getMasterNodeTimeout(request)); resolvedRequest.setTimeout(resolvedRequest.masterNodeTimeout()); resolvedRequest.setWaitForRemoval(request.paramAsBoolean("wait_for_removal", resolvedRequest.getWaitForRemoval())); return resolvedRequest; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 2d2d241c35086..8ccaa9c442115 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -65,7 +65,6 @@ public String getName() { private static void setUpRequestParams(MasterNodeReadRequest clusterRequest, RestRequest request) { clusterRequest.local(request.paramAsBoolean("local", clusterRequest.local())); - clusterRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } @Override @@ -76,7 +75,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return prepareLegacyRequest(request, client, renderDefaults); } - ClusterGetSettingsAction.Request clusterSettingsRequest = new ClusterGetSettingsAction.Request(); + ClusterGetSettingsAction.Request clusterSettingsRequest = new ClusterGetSettingsAction.Request(getMasterNodeTimeout(request)); setUpRequestParams(clusterSettingsRequest, request); @@ -90,7 +89,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } private RestChannelConsumer prepareLegacyRequest(final RestRequest request, final NodeClient client, final boolean renderDefaults) { - ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(false).nodes(false); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)).routingTable(false).nodes(false); setUpRequestParams(clusterStateRequest, request); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index f5910f76f36c9..82325a22ac433 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; @@ -32,7 +33,6 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterHealthAction extends BaseRestHandler { @@ -62,15 +62,15 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC public static ClusterHealthRequest fromRequest(final RestRequest request) { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(indices); + + final var masterNodeTimeout = request.hasParam(RestUtils.REST_MASTER_TIMEOUT_PARAM) + ? RestUtils.getMasterNodeTimeout(request) + : request.paramAsTime("timeout", RestUtils.REST_MASTER_TIMEOUT_DEFAULT); + + final ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(masterNodeTimeout, indices); clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions())); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); - if (request.hasParam("master_timeout")) { - clusterHealthRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - } else { - clusterHealthRequest.masterNodeTimeout(clusterHealthRequest.timeout()); - } String waitForStatus = request.param("wait_for_status"); if (waitForStatus != null) { clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java index cb9be980f35ce..2127a632be0b1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -79,10 +79,9 @@ public boolean allowSystemIndexAccessByDefault() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (request.hasParam("wait_for_metadata_version")) { clusterStateRequest.waitForMetadataVersion(request.paramAsLong("wait_for_metadata_version", 0)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index 823e459c35e23..2280460f5b3f2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -45,9 +45,10 @@ public String getName() { @Override @SuppressWarnings("unchecked") public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); - clusterUpdateSettingsRequest.ackTimeout(getAckTimeout(request)); - clusterUpdateSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest( + getMasterNodeTimeout(request), + getAckTimeout(request) + ); Map source; try (XContentParser parser = request.contentParser()) { source = parser.map(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java index ae375309c301f..21d4e5c100dcb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java @@ -32,8 +32,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final GetDesiredNodesAction.Request getDesiredNodesRequest = new GetDesiredNodesAction.Request(); - getDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final GetDesiredNodesAction.Request getDesiredNodesRequest = new GetDesiredNodesAction.Request(getMasterNodeTimeout(request)); return restChannel -> client.execute( GetDesiredNodesAction.INSTANCE, getDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java index 13fbf3504ebc0..0bb609ff1123d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java @@ -41,10 +41,7 @@ public boolean allowSystemIndexAccessByDefault() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - - final GetFeatureUpgradeStatusRequest req = new GetFeatureUpgradeStatusRequest(); - req.masterNodeTimeout(getMasterNodeTimeout(request)); - - return restChannel -> { client.execute(GetFeatureUpgradeStatusAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; + final GetFeatureUpgradeStatusRequest req = new GetFeatureUpgradeStatusRequest(getMasterNodeTimeout(request)); + return restChannel -> client.execute(GetFeatureUpgradeStatusAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index c38f5effc385a..73701e771eb41 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -38,8 +38,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java index cb9af32955abb..64f0f7897b930 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java @@ -41,10 +41,7 @@ public boolean allowSystemIndexAccessByDefault() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - - final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(); - req.masterNodeTimeout(getMasterNodeTimeout(request)); - - return restChannel -> { client.execute(PostFeatureUpgradeAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; + final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(getMasterNodeTimeout(request)); + return restChannel -> client.execute(PostFeatureUpgradeAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java index 119f6660f2a33..71c36815e99a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java @@ -14,13 +14,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPrevalidateNodeRemovalAction extends BaseRestHandler { @@ -36,6 +36,9 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final var masterNodeTimeout = request.hasParam(RestUtils.REST_MASTER_TIMEOUT_PARAM) + ? RestUtils.getMasterNodeTimeout(request) + : request.paramAsTime("timeout", RestUtils.REST_MASTER_TIMEOUT_DEFAULT); String[] ids = request.paramAsStringArray("ids", Strings.EMPTY_ARRAY); String[] names = request.paramAsStringArray("names", Strings.EMPTY_ARRAY); String[] externalIds = request.paramAsStringArray("external_ids", Strings.EMPTY_ARRAY); @@ -43,13 +46,8 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli .setNames(names) .setIds(ids) .setExternalIds(externalIds) - .build(); + .build(masterNodeTimeout); prevalidationRequest.timeout(request.paramAsTime("timeout", prevalidationRequest.timeout())); - if (request.hasParam("master_timeout")) { - prevalidationRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - } else { - prevalidationRequest.masterNodeTimeout(prevalidationRequest.timeout()); - } return channel -> client.execute( PrevalidateNodeRemovalAction.INSTANCE, prevalidationRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index 38b191ba9f006..0e06f37b1e096 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.rest.RestUtils.getAckTimeout; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestUpdateDesiredNodesAction extends BaseRestHandler { @@ -55,7 +56,14 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli final UpdateDesiredNodesRequest updateDesiredNodesRequest; try (XContentParser parser = request.contentParser()) { - updateDesiredNodesRequest = UpdateDesiredNodesRequest.fromXContent(historyId, version, dryRun, parser); + updateDesiredNodesRequest = UpdateDesiredNodesRequest.fromXContent( + getMasterNodeTimeout(request), + getAckTimeout(request), + historyId, + version, + dryRun, + parser + ); } if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { @@ -68,7 +76,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } } - updateDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index d185f3f921d38..51c58ff79fb6d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -59,10 +59,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes", "data:true")); - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().routingTable(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java index 4faf44ff8c5a7..52dc1bcfb440f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java @@ -74,10 +74,9 @@ protected Table getTableWithHeader(RestRequest request) { @Override protected BaseRestHandler.RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { final String matchPattern = request.hasParam("name") ? request.param("name") : null; - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().metadata(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(ClusterStateResponse clusterStateResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java index 587423d0fa1fd..608cd222c27ba 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestResponseListener; @@ -50,9 +51,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(); + final var clusterHealthRequest = new ClusterHealthRequest(RestUtils.getMasterNodeTimeout(request)); - return channel -> client.admin().cluster().health(clusterHealthRequest, new RestResponseListener(channel) { + return channel -> client.admin().cluster().health(clusterHealthRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(final ClusterHealthResponse health) throws Exception { return RestTable.buildResponse(buildTable(health, request), channel); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index edf8f12b69579..e7c304cdefcc7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -127,13 +127,12 @@ public RestResponse buildResponse(Void ignored) throws Exception { client.admin() .cluster() - .prepareState() + .prepareState(masterNodeTimeout) .clear() .setMetadata(true) .setRoutingTable(true) .setIndices(indices) .setIndicesOptions(subRequestIndicesOptions) - .setMasterNodeTimeout(masterNodeTimeout) .execute(listeners.acquire(clusterStateRef::set)); client.admin() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java index b0805bf423453..a9b4e0cfdee2c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java @@ -45,10 +45,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java index 83e6ea35ec520..57113bb226b31 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java @@ -53,10 +53,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index d2162544abb31..4b70922dd796d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -85,9 +85,8 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final boolean fullId = request.paramAsBoolean("full_id", false); - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index 5ed0cd722d5db..4b54eacb1f38c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -45,8 +45,7 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java index 0e459b53d203c..a7fa0699be95e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java @@ -54,10 +54,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final boolean includeBootstrapPlugins = request.paramAsBoolean("include_bootstrap", false); - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index 1f11a662c0abf..3e7ce29d40120 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -57,9 +57,8 @@ public boolean allowSystemIndexAccessByDefault() { protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index fffa272d8fd12..e9b23c49a97de 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -84,8 +84,7 @@ protected void documentation(StringBuilder sb) { public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); - final var clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices).indicesOptions(IndicesOptions.strictExpandHidden()); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 95fc945226f6f..2a61756429d1e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -70,10 +70,9 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 3c74e46851504..6b5f1786f6699 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -148,7 +148,10 @@ void collectNodes(ActionListener> listener) { }), RemoteClusterNodesAction.Response::new, TransportResponseHandler.TRANSPORT_WORKER) ); } else { - final ClusterStateRequest request = new ClusterStateRequest(); + final ClusterStateRequest request = new ClusterStateRequest( + /* Timeout doesn't really matter with .local(true) */ + TimeValue.THIRTY_SECONDS + ); request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 75903b5bf72ab..c25c065706388 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.threadpool.ThreadPool; @@ -116,6 +117,8 @@ public class SniffConnectionStrategy extends RemoteConnectionStrategy { static final int CHANNELS_PER_CONNECTION = 6; + private static final TimeValue SNIFF_REQUEST_TIMEOUT = TimeValue.THIRTY_SECONDS; // TODO make configurable? + private static final Predicate DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion()) && (node.isMasterNode() == false || node.canContainData() || node.isIngestNode()); @@ -317,7 +320,7 @@ private void collectRemoteNodes(Iterator> seedNodesSuppl sniffResponseHandler = new RemoteClusterNodesSniffResponseHandler(connection, listener, seedNodesSuppliers); } else { action = ClusterStateAction.NAME; - final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(SNIFF_REQUEST_TIMEOUT); clusterStateRequest.clear(); clusterStateRequest.nodes(true); request = clusterStateRequest; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java index dc1528e87f232..78f6f8d429b1c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequestTests.java @@ -35,6 +35,7 @@ public class AddVotingConfigExclusionsRequestTests extends ESTestCase { public void testSerializationForNodeIdOrNodeName() throws IOException { AddVotingConfigExclusionsRequest originalRequest = new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, new String[] { "nodeId1", "nodeId2" }, Strings.EMPTY_ARRAY, TimeValue.ZERO @@ -49,7 +50,7 @@ public void testSerializationForNodeIdOrNodeName() throws IOException { assertThat(deserialized.getNodeNames(), equalTo(originalRequest.getNodeNames())); assertThat(deserialized.getTimeout(), equalTo(originalRequest.getTimeout())); - originalRequest = new AddVotingConfigExclusionsRequest("nodeName1", "nodeName2"); + originalRequest = new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "nodeName1", "nodeName2"); deserialized = copyWriteable(originalRequest, writableRegistry(), AddVotingConfigExclusionsRequest::new); assertThat(deserialized.getNodeIds(), equalTo(originalRequest.getNodeIds())); @@ -80,17 +81,26 @@ public void testResolve() { .build(); assertThat( - new AddVotingConfigExclusionsRequest("local", "other1", "other2").resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "local", "other1", "other2").resolveVotingConfigExclusions( + clusterState + ), containsInAnyOrder(localNodeExclusion, otherNode1Exclusion, otherNode2Exclusion) ); - assertThat(new AddVotingConfigExclusionsRequest("local").resolveVotingConfigExclusions(clusterState), contains(localNodeExclusion)); assertThat( - new AddVotingConfigExclusionsRequest("other1", "other2").resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "local").resolveVotingConfigExclusions(clusterState), + contains(localNodeExclusion) + ); + assertThat( + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1", "other2").resolveVotingConfigExclusions(clusterState), containsInAnyOrder(otherNode1Exclusion, otherNode2Exclusion) ); assertThat( - new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { "other1", "other2" }, TimeValue.ZERO) - .resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + Strings.EMPTY_ARRAY, + new String[] { "other1", "other2" }, + TimeValue.ZERO + ).resolveVotingConfigExclusions(clusterState), containsInAnyOrder(otherNode1Exclusion, otherNode2Exclusion) ); } @@ -99,7 +109,7 @@ public void testResolveAllNodeIdentifiersNullOrEmpty() { assertThat( expectThrows( IllegalArgumentException.class, - () -> new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY, TimeValue.ZERO) + () -> new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY, TimeValue.ZERO) ).getMessage(), equalTo(NODE_IDENTIFIERS_INCORRECTLY_SET_MSG) ); @@ -109,7 +119,12 @@ public void testResolveMoreThanOneNodeIdentifiersSet() { assertThat( expectThrows( IllegalArgumentException.class, - () -> new AddVotingConfigExclusionsRequest(new String[] { "nodeId" }, new String[] { "nodeName" }, TimeValue.ZERO) + () -> new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "nodeId" }, + new String[] { "nodeName" }, + TimeValue.ZERO + ) ).getMessage(), equalTo(NODE_IDENTIFIERS_INCORRECTLY_SET_MSG) ); @@ -143,14 +158,22 @@ public void testResolveByNodeIds() { .build(); assertThat( - new AddVotingConfigExclusionsRequest(new String[] { "nodeId1", "nodeId2" }, Strings.EMPTY_ARRAY, TimeValue.ZERO) - .resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "nodeId1", "nodeId2" }, + Strings.EMPTY_ARRAY, + TimeValue.ZERO + ).resolveVotingConfigExclusions(clusterState), containsInAnyOrder(node1Exclusion, node2Exclusion) ); assertThat( - new AddVotingConfigExclusionsRequest(new String[] { "nodeId1", "unresolvableNodeId" }, Strings.EMPTY_ARRAY, TimeValue.ZERO) - .resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "nodeId1", "unresolvableNodeId" }, + Strings.EMPTY_ARRAY, + TimeValue.ZERO + ).resolveVotingConfigExclusions(clusterState), containsInAnyOrder(node1Exclusion, unresolvableVotingConfigExclusion) ); } @@ -183,12 +206,16 @@ public void testResolveByNodeNames() { .build(); assertThat( - new AddVotingConfigExclusionsRequest("nodeName1", "nodeName2").resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "nodeName1", "nodeName2").resolveVotingConfigExclusions( + clusterState + ), containsInAnyOrder(node1Exclusion, node2Exclusion) ); assertThat( - new AddVotingConfigExclusionsRequest("nodeName1", "unresolvableNodeName").resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "nodeName1", "unresolvableNodeName").resolveVotingConfigExclusions( + clusterState + ), containsInAnyOrder(node1Exclusion, unresolvableVotingConfigExclusion) ); } @@ -208,7 +235,7 @@ public void testResolveAmbiguousName() { .nodes(new Builder().add(node1).add(node2).localNodeId(node1.getId())) .build(); - final var request = new AddVotingConfigExclusionsRequest("ambiguous-name"); + final var request = new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "ambiguous-name"); assertThat( expectThrows(IllegalArgumentException.class, () -> request.resolveVotingConfigExclusions(clusterState)).getMessage(), allOf( @@ -248,8 +275,12 @@ public void testResolveRemoveExistingVotingConfigExclusions() { .build(); assertThat( - new AddVotingConfigExclusionsRequest(new String[] { "nodeId1", "nodeId2" }, Strings.EMPTY_ARRAY, TimeValue.ZERO) - .resolveVotingConfigExclusions(clusterState), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "nodeId1", "nodeId2" }, + Strings.EMPTY_ARRAY, + TimeValue.ZERO + ).resolveVotingConfigExclusions(clusterState), contains(node2Exclusion) ); } @@ -278,13 +309,17 @@ public void testResolveAndCheckMaximum() { final ClusterState clusterState = builder.build(); assertThat( - new AddVotingConfigExclusionsRequest("local").resolveVotingConfigExclusionsAndCheckMaximum(clusterState, 2, "setting.name"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "local").resolveVotingConfigExclusionsAndCheckMaximum( + clusterState, + 2, + "setting.name" + ), contains(localNodeExclusion) ); assertThat( expectThrows( IllegalArgumentException.class, - () -> new AddVotingConfigExclusionsRequest("local").resolveVotingConfigExclusionsAndCheckMaximum( + () -> new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "local").resolveVotingConfigExclusionsAndCheckMaximum( clusterState, 1, "setting.name" diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequestTests.java index 19f977c020e91..2bee4898e0e1d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequestTests.java @@ -16,7 +16,7 @@ public class ClearVotingConfigExclusionsRequestTests extends ESTestCase { public void testSerialization() throws IOException { - final ClearVotingConfigExclusionsRequest originalRequest = new ClearVotingConfigExclusionsRequest(); + final ClearVotingConfigExclusionsRequest originalRequest = new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT); if (randomBoolean()) { originalRequest.setWaitForRemoval(randomBoolean()); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 02ec4dc508c0b..3af51f3a2e795 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -180,7 +180,7 @@ public void testWithdrawsVoteFromANode() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1"), expectSuccess(r -> { assertNotNull(r); final var state = clusterService.getClusterApplierService().state(); @@ -198,7 +198,7 @@ public void testWithdrawsVotesFromMultipleNodes() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1", "other2"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1", "other2"), expectSuccess(r -> { assertNotNull(r); final var state = clusterService.getClusterApplierService().state(); @@ -231,7 +231,7 @@ public void testReturnsImmediatelyIfVoteAlreadyWithdrawn() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1"), expectSuccess(r -> { assertNotNull(r); final var finalState = clusterService.getClusterApplierService().state(); @@ -248,7 +248,12 @@ public void testExcludeAbsentNodesByNodeIds() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest(new String[] { "absent_id" }, Strings.EMPTY_ARRAY, TimeValue.timeValueSeconds(30)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "absent_id" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ), expectSuccess(r -> { final var state = clusterService.getClusterApplierService().state(); assertEquals( @@ -268,7 +273,12 @@ public void testExcludeExistingNodesByNodeIds() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest(new String[] { "other1", "other2" }, Strings.EMPTY_ARRAY, TimeValue.timeValueSeconds(30)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "other1", "other2" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ), expectSuccess(r -> { assertNotNull(r); final var state = clusterService.getClusterApplierService().state(); @@ -285,7 +295,7 @@ public void testExcludeAbsentNodesByNodeNames() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("absent_node"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "absent_node"), expectSuccess(r -> { final var state = clusterService.getClusterApplierService().state(); assertEquals( @@ -305,7 +315,7 @@ public void testExcludeExistingNodesByNodeNames() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1", "other2"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1", "other2"), expectSuccess(r -> { assertNotNull(r); final var state = clusterService.getClusterApplierService().state(); @@ -334,8 +344,13 @@ public void testTriggersReconfigurationEvenIfAllExclusionsAlreadyAddedButStillIn localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), randomFrom( - new AddVotingConfigExclusionsRequest("other1"), - new AddVotingConfigExclusionsRequest(new String[] { "other1" }, Strings.EMPTY_ARRAY, TimeValue.timeValueSeconds(30)) + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1"), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "other1" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ) ), expectSuccess(r -> { assertNotNull(r); @@ -368,7 +383,12 @@ public void testExcludeByNodeIdSucceedsEvenIfAllExclusionsAlreadyAdded() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest(new String[] { "other1" }, Strings.EMPTY_ARRAY, TimeValue.timeValueSeconds(30)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + new String[] { "other1" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ), expectSuccess(r -> { assertNotNull(r); final var finalState = clusterService.getClusterApplierService().state(); @@ -400,7 +420,7 @@ public void testExcludeByNodeNameSucceedsEvenIfAllExclusionsAlreadyAdded() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1"), expectSuccess(r -> { assertNotNull(r); final var finalState = clusterService.getClusterApplierService().state(); @@ -457,7 +477,7 @@ public void testReturnsErrorIfMaximumExclusionCountExceeded() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest("other1", "other2"), + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, "other1", "other2"), expectError(e -> { final Throwable rootCause = e.getRootCause(); assertThat(rootCause, instanceOf(IllegalArgumentException.class)); @@ -485,7 +505,12 @@ public void testTimesOut() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { "other1" }, TimeValue.timeValueMillis(100)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + Strings.EMPTY_ARRAY, + new String[] { "other1" }, + TimeValue.timeValueMillis(100) + ), expectError(e -> { final Throwable rootCause = e.getRootCause(); assertThat(rootCause, instanceOf(ElasticsearchTimeoutException.class)); @@ -504,7 +529,12 @@ public void testCannotAddVotingConfigExclusionsWhenItIsDisabled() { transportService.sendRequest( localNode, TransportAddVotingConfigExclusionsAction.TYPE.name(), - new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { "other1" }, TimeValue.timeValueMillis(100)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + Strings.EMPTY_ARRAY, + new String[] { "other1" }, + TimeValue.timeValueMillis(100) + ), expectError(e -> { final Throwable rootCause = e.getRootCause(); assertThat(rootCause, instanceOf(IllegalStateException.class)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java index 22aa5e9869afa..eb2d0ce987181 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java @@ -118,7 +118,9 @@ public void setupForTest() { public void testClearsVotingConfigExclusions() { final CountDownLatch countDownLatch = new CountDownLatch(1); - final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT + ); clearVotingConfigExclusionsRequest.setWaitForRemoval(false); transportService.sendRequest( localNode, @@ -136,7 +138,9 @@ public void testClearsVotingConfigExclusions() { public void testTimesOutIfWaitingForNodesThatAreNotRemoved() { final CountDownLatch countDownLatch = new CountDownLatch(1); - final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT + ); clearVotingConfigExclusionsRequest.setTimeout(TimeValue.timeValueMillis(100)); transportService.sendRequest( localNode, @@ -165,7 +169,7 @@ public void testSucceedsIfNodesAreRemovedWhileWaiting() { transportService.sendRequest( localNode, TransportClearVotingConfigExclusionsAction.TYPE.name(), - new ClearVotingConfigExclusionsRequest(), + new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT), expectSuccess(r -> { assertThat(clusterService.getClusterApplierService().state().getVotingConfigExclusions(), empty()); countDownLatch.countDown(); @@ -187,7 +191,7 @@ public void testCannotClearVotingConfigurationWhenItIsDisabled() { transportService.sendRequest( localNode, TransportClearVotingConfigExclusionsAction.TYPE.name(), - new ClearVotingConfigExclusionsRequest(), + new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT), expectError(e -> { final Throwable rootCause = e.getRootCause(); assertThat(rootCause, instanceOf(IllegalStateException.class)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java index 3d88e4e6d6a09..831f033b83031 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesActionTests.java @@ -117,7 +117,14 @@ public void testUpdateDesiredNodes() { .stream() .map(DesiredNodeWithStatus::desiredNode) .toList(); - request = new UpdateDesiredNodesRequest(desiredNodes.historyID(), desiredNodes.version() + 1, updatedNodes, false); + request = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + desiredNodes.historyID(), + desiredNodes.version() + 1, + updatedNodes, + false + ); } else { request = randomUpdateDesiredNodesRequest(); } @@ -148,6 +155,8 @@ public void testUpdatesAreIdempotent() { Collections.shuffle(equivalentDesiredNodesList, random()); } final UpdateDesiredNodesRequest equivalentDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion(), equivalentDesiredNodesList, @@ -165,6 +174,8 @@ public void testUpdateSameHistoryAndVersionWithDifferentContentsFails() { final var latestDesiredNodes = TransportUpdateDesiredNodesAction.updateDesiredNodes(null, updateDesiredNodesRequest); final UpdateDesiredNodesRequest request = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, latestDesiredNodes.historyID(), latestDesiredNodes.version(), randomList(1, 10, DesiredNodesTestCase::randomDesiredNode), @@ -182,6 +193,8 @@ public void testBackwardUpdatesFails() { final var updateDesiredNodesRequest = randomUpdateDesiredNodesRequest(); final var latestDesiredNodes = TransportUpdateDesiredNodesAction.updateDesiredNodes(null, updateDesiredNodesRequest); final UpdateDesiredNodesRequest request = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, latestDesiredNodes.historyID(), latestDesiredNodes.version() - 1, List.copyOf(latestDesiredNodes.nodes().stream().map(DesiredNodeWithStatus::desiredNode).toList()), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestSerializationTests.java index cf491c52b49d6..4cc19260c7b05 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestSerializationTests.java @@ -20,7 +20,14 @@ protected Writeable.Reader instanceReader() { @Override protected UpdateDesiredNodesRequest mutateInstance(UpdateDesiredNodesRequest request) { - return new UpdateDesiredNodesRequest(request.getHistoryID(), request.getVersion() + 1, request.getNodes(), request.isDryRun()); + return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + request.getHistoryID(), + request.getVersion() + 1, + request.getNodes(), + request.isDryRun() + ); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestTests.java index df963829810e5..2837b2a107624 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequestTests.java @@ -26,6 +26,8 @@ public class UpdateDesiredNodesRequestTests extends ESTestCase { public void testValidation() { final UpdateDesiredNodesRequest updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomBoolean() ? "" : " ", -1, randomBoolean() ? Collections.emptyList() : List.of(hotDesiredNode()), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java index 4a59ca93c14d9..931e84b9e915a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestTests.java @@ -42,12 +42,12 @@ public void testSerialize() throws Exception { } public void testRequestReturnsHiddenIndicesByDefault() { - final ClusterHealthRequest defaultRequest = new ClusterHealthRequest(); + final ClusterHealthRequest defaultRequest = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); assertTrue(defaultRequest.indicesOptions().expandWildcardsHidden()); } private ClusterHealthRequest randomRequest() { - ClusterHealthRequest request = new ClusterHealthRequest(); + ClusterHealthRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); request.waitForStatus(randomFrom(ClusterHealthStatus.values())); request.waitForNodes(randomFrom("", "<", "<=", ">", ">=") + between(0, 1000)); request.waitForNoInitializingShards(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthActionTests.java index f2a852a8bd966..b3cc363d21cef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthActionTests.java @@ -33,7 +33,7 @@ public class TransportClusterHealthActionTests extends ESTestCase { public void testWaitForInitializingShards() throws Exception { final String[] indices = { "test" }; - final ClusterHealthRequest request = new ClusterHealthRequest(); + final ClusterHealthRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); request.waitForNoInitializingShards(true); ClusterState clusterState = randomClusterStateWithInitializingShards("test", 0); ClusterHealthResponse response = new ClusterHealthResponse("", indices, clusterState); @@ -52,7 +52,7 @@ public void testWaitForInitializingShards() throws Exception { public void testWaitForAllShards() { final String[] indices = { "test" }; - final ClusterHealthRequest request = new ClusterHealthRequest(); + final ClusterHealthRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); request.waitForActiveShards(ActiveShardCount.ALL); ClusterState clusterState = randomClusterStateWithInitializingShards("test", 1); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestSerializationTests.java index 584ad10a5a16c..932b0b7cbac6d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestSerializationTests.java @@ -38,7 +38,7 @@ protected PrevalidateNodeRemovalRequest mutateInstance(PrevalidateNodeRemovalReq ) .setIds(request.getIds()) .setExternalIds(request.getExternalIds()) - .build(); + .build(TEST_REQUEST_TIMEOUT); case 1 -> PrevalidateNodeRemovalRequest.builder() .setNames(request.getNames()) @@ -49,7 +49,7 @@ protected PrevalidateNodeRemovalRequest mutateInstance(PrevalidateNodeRemovalReq ) ) .setExternalIds(request.getExternalIds()) - .build(); + .build(TEST_REQUEST_TIMEOUT); case 2 -> PrevalidateNodeRemovalRequest.builder() .setNames(request.getNames()) @@ -60,7 +60,7 @@ protected PrevalidateNodeRemovalRequest mutateInstance(PrevalidateNodeRemovalReq PrevalidateNodeRemovalRequestSerializationTests::randomStringArray ) ) - .build(); + .build(TEST_REQUEST_TIMEOUT); default -> throw new IllegalStateException("unexpected value: " + i); }; } @@ -74,6 +74,6 @@ private static PrevalidateNodeRemovalRequest randomRequest() { .setNames(randomStringArray()) .setIds(randomStringArray()) .setExternalIds(randomStringArray()) - .build(); + .build(TEST_REQUEST_TIMEOUT); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestTests.java index 27bc4a0f0bc29..60ba698ce14d9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequestTests.java @@ -18,24 +18,28 @@ public class PrevalidateNodeRemovalRequestTests extends ESTestCase { public void testValidate() { - ActionRequestValidationException ex1 = PrevalidateNodeRemovalRequest.builder().build().validate(); + ActionRequestValidationException ex1 = PrevalidateNodeRemovalRequest.builder().build(TEST_REQUEST_TIMEOUT).validate(); assertNotNull(ex1); assertThat(ex1.validationErrors(), equalTo(List.of(PrevalidateNodeRemovalRequest.VALIDATION_ERROR_MSG_NO_QUERY_PARAM))); - ActionRequestValidationException ex2 = PrevalidateNodeRemovalRequest.builder().setNames("name1").setIds("id1").build().validate(); + ActionRequestValidationException ex2 = PrevalidateNodeRemovalRequest.builder() + .setNames("name1") + .setIds("id1") + .build(TEST_REQUEST_TIMEOUT) + .validate(); assertNotNull(ex2); assertThat(ex2.validationErrors(), equalTo(List.of(PrevalidateNodeRemovalRequest.VALIDATION_ERROR_MSG_ONLY_ONE_QUERY_PARAM))); ActionRequestValidationException ex3 = PrevalidateNodeRemovalRequest.builder() .setNames("name1") .setExternalIds("id1") - .build() + .build(TEST_REQUEST_TIMEOUT) .validate(); assertNotNull(ex3); assertThat(ex3.validationErrors(), equalTo(List.of(PrevalidateNodeRemovalRequest.VALIDATION_ERROR_MSG_ONLY_ONE_QUERY_PARAM))); - assertNull(PrevalidateNodeRemovalRequest.builder().setNames("name1").build().validate()); - assertNull(PrevalidateNodeRemovalRequest.builder().setIds("id1").build().validate()); - assertNull(PrevalidateNodeRemovalRequest.builder().setExternalIds("external_id1").build().validate()); + assertNull(PrevalidateNodeRemovalRequest.builder().setNames("name1").build(TEST_REQUEST_TIMEOUT).validate()); + assertNull(PrevalidateNodeRemovalRequest.builder().setIds("id1").build(TEST_REQUEST_TIMEOUT).validate()); + assertNull(PrevalidateNodeRemovalRequest.builder().setExternalIds("external_id1").build(TEST_REQUEST_TIMEOUT).validate()); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalActionTests.java index d6e48104620cd..a580fa17b6d8b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalActionTests.java @@ -35,24 +35,32 @@ public void testResolveNodes() { DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(node1).add(node2).build(); assertThat( - resolveNodes(PrevalidateNodeRemovalRequest.builder().setNames(node1Name).build(), discoveryNodes), + resolveNodes(PrevalidateNodeRemovalRequest.builder().setNames(node1Name).build(TEST_REQUEST_TIMEOUT), discoveryNodes), equalTo(Set.of(node1)) ); assertThat( - resolveNodes(PrevalidateNodeRemovalRequest.builder().setIds(node1Id, node2Id).build(), discoveryNodes), + resolveNodes(PrevalidateNodeRemovalRequest.builder().setIds(node1Id, node2Id).build(TEST_REQUEST_TIMEOUT), discoveryNodes), equalTo(Set.of(node1, node2)) ); expectThrows( ResourceNotFoundException.class, - () -> resolveNodes(PrevalidateNodeRemovalRequest.builder().setNames(node1Name, node1Id).build(), discoveryNodes) + () -> resolveNodes( + PrevalidateNodeRemovalRequest.builder().setNames(node1Name, node1Id).build(TEST_REQUEST_TIMEOUT), + discoveryNodes + ) ); expectThrows( ResourceNotFoundException.class, - () -> resolveNodes(PrevalidateNodeRemovalRequest.builder().setIds(node1Name, node1Id).build(), discoveryNodes) + () -> resolveNodes( + PrevalidateNodeRemovalRequest.builder().setIds(node1Name, node1Id).build(TEST_REQUEST_TIMEOUT), + discoveryNodes + ) ); assertThat( resolveNodes( - PrevalidateNodeRemovalRequest.builder().setExternalIds(node1.getExternalId(), node2.getExternalId()).build(), + PrevalidateNodeRemovalRequest.builder() + .setExternalIds(node1.getExternalId(), node2.getExternalId()) + .build(TEST_REQUEST_TIMEOUT), discoveryNodes ), equalTo(Set.of(node1, node2)) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index 774093834e941..ed2660061f765 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -63,13 +63,13 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws ); XContentParseException iae = expectThrows(XContentParseException.class, () -> { try (var parser = createParser(xContentType.xContent(), mutated)) { - ClusterUpdateSettingsRequest.fromXContent(parser); + ClusterUpdateSettingsRequest.fromXContent(ClusterUpdateSettingsRequestTests::newTestRequest, parser); } }); assertThat(iae.getMessage(), containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "]")); } else { try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser); + var parsedRequest = ClusterUpdateSettingsRequest.fromXContent(ClusterUpdateSettingsRequestTests::newTestRequest, parser); assertNull(parser.nextToken()); assertThat(parsedRequest.transientSettings(), equalTo(request.transientSettings())); @@ -79,7 +79,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } private static ClusterUpdateSettingsRequest createTestItem() { - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); request.persistentSettings(ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2)); request.transientSettings(ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2)); return request; @@ -119,11 +119,15 @@ public void testOperatorHandler() throws IOException { }"""; try (XContentParser parser = createParser(XContentType.JSON.xContent(), oneSettingJSON)) { - ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser); + var parsedRequest = ClusterUpdateSettingsRequest.fromXContent(ClusterUpdateSettingsRequestTests::newTestRequest, parser); assertThat( action.modifiedKeys(parsedRequest), containsInAnyOrder("indices.recovery.max_bytes_per_sec", "cluster.remote.cluster_one.seeds") ); } } + + public static ClusterUpdateSettingsRequest newTestRequest() { + return new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateApiTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateApiTests.java index b726910f5d8cf..3c33e42ab94d0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateApiTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateApiTests.java @@ -31,20 +31,20 @@ protected Settings nodeSettings() { } public void testWaitForMetadataVersion() throws Exception { - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT); clusterStateRequest.waitForTimeout(TimeValue.timeValueHours(1)); ClusterStateResponse response = clusterAdmin().state(clusterStateRequest).get(10L, TimeUnit.SECONDS); assertThat(response.isWaitForTimedOut(), is(false)); long metadataVersion = response.getState().getMetadata().version(); // Verify that cluster state api returns after the cluster settings have been updated: - clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT); clusterStateRequest.waitForMetadataVersion(metadataVersion + 1); ActionFuture future2 = clusterAdmin().state(clusterStateRequest); assertThat(future2.isDone(), is(false)); - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); // Pick an arbitrary dynamic cluster setting and change it. Just to get metadata version incremented: updateSettingsRequest.transientSettings(Settings.builder().put("cluster.max_shards_per_node", 999)); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); @@ -63,7 +63,7 @@ public void testWaitForMetadataVersion() throws Exception { assertThat(response.getState(), nullValue()); // Remove transient setting, otherwise test fails with the reason that this test leaves state behind: - updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); updateSettingsRequest.transientSettings(Settings.builder().put("cluster.max_shards_per_node", (String) null)); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index 1b1535185bef7..f30653fd7e6d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -29,7 +29,7 @@ public void testSerialization() throws Exception { for (int i = 0; i < iterations; i++) { IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()) + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT).routingTable(randomBoolean()) .metadata(randomBoolean()) .nodes(randomBoolean()) .blocks(randomBoolean()) @@ -68,7 +68,7 @@ public void testSerialization() throws Exception { } public void testWaitForMetadataVersion() { - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT); expectThrows( IllegalArgumentException.class, () -> clusterStateRequest.waitForMetadataVersion(randomLongBetween(Long.MIN_VALUE, 0)) @@ -84,21 +84,27 @@ private static void assertOptionsMatch(IndicesOptions in, IndicesOptions out) { } public void testDescription() { - assertThat(new ClusterStateRequest().clear().getDescription(), equalTo("cluster state [master timeout [30s]]")); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().getDescription(), equalTo("cluster state [master timeout [30s]]")); assertThat( - new ClusterStateRequest().masterNodeTimeout(TimeValue.timeValueMinutes(5)).getDescription(), + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).masterNodeTimeout(TimeValue.timeValueMinutes(5)).getDescription(), equalTo("cluster state [routing table, nodes, metadata, blocks, customs, master timeout [5m]]") ); - assertThat(new ClusterStateRequest().clear().routingTable(true).getDescription(), containsString("routing table")); - assertThat(new ClusterStateRequest().clear().nodes(true).getDescription(), containsString("nodes")); - assertThat(new ClusterStateRequest().clear().metadata(true).getDescription(), containsString("metadata")); - assertThat(new ClusterStateRequest().clear().blocks(true).getDescription(), containsString("blocks")); - assertThat(new ClusterStateRequest().clear().customs(true).getDescription(), containsString("customs")); - assertThat(new ClusterStateRequest().local(true).getDescription(), containsString("local")); assertThat( - new ClusterStateRequest().waitForMetadataVersion(23L).getDescription(), + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().routingTable(true).getDescription(), + containsString("routing table") + ); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().nodes(true).getDescription(), containsString("nodes")); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().metadata(true).getDescription(), containsString("metadata")); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().blocks(true).getDescription(), containsString("blocks")); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().customs(true).getDescription(), containsString("customs")); + assertThat(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).local(true).getDescription(), containsString("local")); + assertThat( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(23L).getDescription(), containsString("wait for metadata version [23] with timeout [1m]") ); - assertThat(new ClusterStateRequest().indices("foo", "bar").getDescription(), containsString("indices [foo, bar]")); + assertThat( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).indices("foo", "bar").getDescription(), + containsString("indices [foo, bar]") + ); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/ReservedStateAwareHandledTransportActionTests.java b/server/src/test/java/org/elasticsearch/action/support/ReservedStateAwareHandledTransportActionTests.java index c2ff59f0ccbe0..64bde9f8cc0b3 100644 --- a/server/src/test/java/org/elasticsearch/action/support/ReservedStateAwareHandledTransportActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/ReservedStateAwareHandledTransportActionTests.java @@ -56,9 +56,9 @@ public void testRejectImmutableConflictClusterStateUpdate() { handler.doExecute(mock(Task.class), new DummyRequest(), future); assertNotNull(future.actionGet()); - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder().put("a", "a value").build() - ).transientSettings(Settings.builder().put("e", "e value").build()); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(Settings.builder().put("a", "a value").build()) + .transientSettings(Settings.builder().put("e", "e value").build()); FakeReservedStateAwareAction action = new FakeReservedStateAwareAction( "internal:testClusterSettings", @@ -80,9 +80,9 @@ public void onFailure(Exception e) { } })).getMessage().contains("with errors: [[a] set as read-only by [namespace_one], " + "[e] set as read-only by [namespace_two]")); - ClusterUpdateSettingsRequest okRequest = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder().put("m", "m value").build() - ).transientSettings(Settings.builder().put("n", "n value").build()); + ClusterUpdateSettingsRequest okRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(Settings.builder().put("m", "m value").build()) + .transientSettings(Settings.builder().put("n", "n value").build()); // this should just work, no conflicts action.doExecute(mock(Task.class), okRequest, new ActionListener<>() { diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 6568464705d9e..890614fc80217 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -840,9 +840,9 @@ public void testRejectImmutableConflictClusterStateUpdate() { // nothing should happen here, since the request doesn't touch any of the immutable state keys noHandler.validateForReservedState(new Request(), clusterState); - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder().put("a", "a value").build() - ).transientSettings(Settings.builder().put("e", "e value").build()); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(Settings.builder().put("a", "a value").build()) + .transientSettings(Settings.builder().put("e", "e value").build()); FakeClusterStateUpdateAction action = new FakeClusterStateUpdateAction( "internal:testClusterSettings", @@ -859,9 +859,9 @@ public void testRejectImmutableConflictClusterStateUpdate() { .contains("with errors: [[a] set as read-only by [namespace_one], " + "[e] set as read-only by [namespace_two]") ); - ClusterUpdateSettingsRequest okRequest = new ClusterUpdateSettingsRequest().persistentSettings( - Settings.builder().put("m", "m value").build() - ).transientSettings(Settings.builder().put("n", "n value").build()); + ClusterUpdateSettingsRequest okRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(Settings.builder().put("m", "m value").build()) + .transientSettings(Settings.builder().put("n", "n value").build()); // this should just work, no conflicts action.validateForReservedState(okRequest, clusterState); diff --git a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java index 600c09be2c12f..8446fed6ddddd 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java @@ -98,7 +98,11 @@ public void UnsupportedOperationException.class, safeAwaitFailure( ClusterStateResponse.class, - listener -> remoteClusterClient.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(), listener) + listener -> remoteClusterClient.execute( + ClusterStateAction.REMOTE_TYPE, + new ClusterStateRequest(TEST_REQUEST_TIMEOUT), + listener + ) ) ).getMessage() ); diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 7ef9e44eb8e7e..bc673b735c18e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -159,7 +159,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte ActionTestUtils.execute( action, new CancellableTask(1, "direct", TransportClusterHealthAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), - new ClusterHealthRequest().waitForGreenStatus(), + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT).waitForGreenStatus(), listener ); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java index d99d787a1d243..8da7418a4d42a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java @@ -117,6 +117,8 @@ public static void assertDesiredNodesStatusIsCorrect( public static UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest() { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(random()), randomLongBetween(0, Long.MAX_VALUE - 1000), randomList(1, 100, DesiredNodesTestCase::randomDesiredNode), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index e66dd32b718b7..c6eb65381fab6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -248,7 +248,7 @@ public void testIndexTemplateWithAlias() throws Exception { List errors = putTemplateDetail(request); assertThat(errors, is(empty())); - final Metadata metadata = clusterAdmin().prepareState().get().getState().metadata(); + final Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); IndexTemplateMetadata template = metadata.templates().get(templateName); Map aliasMap = template.getAliases(); assertThat(aliasMap.size(), equalTo(1)); @@ -266,7 +266,7 @@ public void testFindTemplates() throws Exception { putTemplateDetail(new PutRequest("test", "foo-1").patterns(singletonList("foo-*")).order(1)); putTemplateDetail(new PutRequest("test", "foo-2").patterns(singletonList("foo-*")).order(2)); putTemplateDetail(new PutRequest("test", "bar").patterns(singletonList("bar-*")).order(between(0, 100))); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat( MetadataIndexTemplateService.findV1Templates(state.metadata(), "foo-1234", randomBoolean()) .stream() @@ -296,7 +296,7 @@ public void testFindTemplatesWithHiddenIndices() throws Exception { new PutRequest("testFindTemplatesWithHiddenIndices", "sneaky-hidden").patterns(singletonList("sneaky*")) .settings(Settings.builder().put("index.hidden", true).build()) ); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); // hidden assertThat( @@ -380,7 +380,7 @@ public void testFindTemplatesWithHiddenIndices() throws Exception { public void testFindTemplatesWithDateMathIndex() throws Exception { client().admin().indices().prepareDeleteTemplate("*").get(); // Delete all existing templates putTemplateDetail(new PutRequest("testFindTemplatesWithDateMathIndex", "foo-1").patterns(singletonList("test-*")).order(1)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat( MetadataIndexTemplateService.findV1Templates(state.metadata(), "", false) diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index dc08fffa49c19..39bc392b2ad4f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -130,7 +130,7 @@ protected Collection> nodePlugins() { } ClusterState getNodeClusterState(String node) { - return client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + return client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); } void assertNoMaster(final String node) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index d3e480eb766a7..07d455477b73d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -449,14 +449,14 @@ public void testUpdateSyncIntervalDynamically() { assertNotNull(indexService.getFsyncTask()); assertTrue(indexService.getFsyncTask().mustReschedule()); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals("5s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); indicesAdmin().prepareClose("test").get(); indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "20s")) .get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 2617f82b09f08..787cd32b7e322 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -2719,7 +2719,7 @@ public void testEnableSearchWorkerThreads() throws IOException { try { ClusterUpdateSettingsResponse response = client().admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(SEARCH_WORKER_THREADS_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); @@ -2730,7 +2730,7 @@ public void testEnableSearchWorkerThreads() throws IOException { // reset original default setting client().admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull(SEARCH_WORKER_THREADS_ENABLED.getKey()).build()) .get(); try (SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.DFS, randomBoolean())) { @@ -2868,7 +2868,7 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { try { ClusterUpdateSettingsResponse response = client().admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); @@ -2891,7 +2891,7 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { // Reset to the original default setting and check to ensure it takes effect. client().admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey()).build()) .get(); { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index f5e69a65a6d06..9a361b0ec2a90 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1036,7 +1036,9 @@ public void testSnapshotPrimaryRelocations() { continueOrDie( createRepoAndIndex(repoName, index, shards), - createIndexResponse -> client().admin().cluster().state(new ClusterStateRequest(), clusterStateResponseStepListener) + createIndexResponse -> client().admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT), clusterStateResponseStepListener) ); continueOrDie(clusterStateResponseStepListener, clusterStateResponse -> { @@ -1048,7 +1050,8 @@ public void testSnapshotPrimaryRelocations() { @Override public void run() { final SubscribableListener updatedClusterStateResponseStepListener = new SubscribableListener<>(); - masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); + masterAdminClient.cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT), updatedClusterStateResponseStepListener); continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> { final ShardRouting shardRouting = updatedClusterState.getState() .routingTable() diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index d2e885f8da4be..e9b4838125409 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -104,7 +104,7 @@ public void testConnectAndExecuteRequest() throws Exception { ClusterStateResponse clusterStateResponse = safeAwait( listener -> client.execute( ClusterStateAction.REMOTE_TYPE, - new ClusterStateRequest(), + new ClusterStateRequest(TEST_REQUEST_TIMEOUT), ActionListener.runBefore( listener, () -> assertTrue(Thread.currentThread().getName().contains('[' + TEST_THREAD_POOL_NAME + ']')) @@ -184,7 +184,7 @@ public void testEnsureWeReconnect() throws Exception { ) ); ClusterStateResponse clusterStateResponse = safeAwait( - listener -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(), listener) + listener -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(TEST_REQUEST_TIMEOUT), listener) ); assertNotNull(clusterStateResponse); assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value()); @@ -273,7 +273,11 @@ public void testQuicklySkipUnavailableClusters() throws Exception { ESTestCase.assertThat( safeAwaitFailure( ClusterStateResponse.class, - listener -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(), listener) + listener -> client.execute( + ClusterStateAction.REMOTE_TYPE, + new ClusterStateRequest(TEST_REQUEST_TIMEOUT), + listener + ) ), instanceOf(ConnectTransportException.class) ); @@ -284,7 +288,7 @@ public void testQuicklySkipUnavailableClusters() throws Exception { assertBusy(() -> { ClusterStateResponse ignored = safeAwait( - listener -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(), listener) + listener -> client.execute(ClusterStateAction.REMOTE_TYPE, new ClusterStateRequest(TEST_REQUEST_TIMEOUT), listener) ); // keep retrying on an exception, the goal is to check that we eventually reconnect }); diff --git a/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java index a9196a3bb1377..b80c1eb16dfae 100644 --- a/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java @@ -164,7 +164,7 @@ private static void sendRequest(TransportService source, TransportService target source.sendRequest( target.getLocalNode(), TransportClusterHealthAction.NAME, - new ClusterHealthRequest(), + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT), new TransportResponseHandler<>() { private AtomicBoolean responded = new AtomicBoolean(); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 16ddcb750dd6b..76733a1b58366 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -103,7 +103,7 @@ protected void checkTransientErrorsDuringRecoveryAreRetried(String recoveryActio ); final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForNodes(">=3").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(">=3").get(); assertThat(response.isTimedOut(), is(false)); indicesAdmin().prepareCreate(indexName) @@ -128,7 +128,7 @@ protected void checkTransientErrorsDuringRecoveryAreRetried(String recoveryActio indexRandom(true, requests); ensureSearchable(indexName); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); final String blueNodeId = getNodeId(blueNodeName); assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty()); @@ -204,7 +204,7 @@ public void checkDisconnectsWhileRecovering(String recoveryActionToBlock) throws ); final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForNodes(">=3").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(">=3").get(); assertThat(response.isTimedOut(), is(false)); indicesAdmin().prepareCreate(indexName) @@ -219,7 +219,7 @@ public void checkDisconnectsWhileRecovering(String recoveryActionToBlock) throws indexRandom(true, requests); ensureSearchable(indexName); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); final String blueNodeId = getNodeId(blueNodeName); assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty()); @@ -344,7 +344,7 @@ public void sendRequest( "Expected there to be some initializing shards", client(blueNodeName).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setLocal(true) .get() .getState() diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java index cae57d5137acf..c3ca68a3b8441 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java @@ -282,7 +282,7 @@ public void testShapeRelations() throws Exception { .setMapping(mapping) .setSettings(settings(version).build()); mappingRequest.get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); // Create a multipolygon with two polygons. The first is an rectangle of size 10x10 // with a hole of size 5x5 equidistant from all sides. This hole in turn contains @@ -458,7 +458,7 @@ public void testBulk() throws Exception { protected abstract void doDistanceAndBoundingBoxTest(String key); private static String findNodeName(String index) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0); String nodeId = shard.assignedShards().get(0).currentNodeId(); return state.getNodes().get(nodeId).getName(); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index aa3e97c7f057f..ad738d8985e03 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -510,7 +510,7 @@ protected void assertDocCount(String index, long count) { * @param metadata snapshot metadata to write (as returned by {@link SnapshotInfo#userMetadata()}) */ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map metadata) throws Exception { - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); assertNotNull(repositoriesMetadata); final RepositoryMetadata initialRepoMetadata = repositoriesMetadata.repository(repoName); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java index 7dc9374da02ea..61edc1354239e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java @@ -157,7 +157,7 @@ protected void disconnectFromRemoteClusters() throws Exception { settings.putNull("cluster.remote." + clusterAlias + ".proxy_address"); } } - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + client().admin().cluster().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(settings).get(); assertBusy(() -> { for (TransportService transportService : cluster(LOCAL_CLUSTER).getInstances(TransportService.class)) { assertThat(transportService.getRemoteClusterService().getRegisteredRemoteClusterNames(), empty()); @@ -204,7 +204,11 @@ protected void configureRemoteCluster(String clusterAlias, Collection se } builder.build(); - ClusterUpdateSettingsResponse resp = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + ClusterUpdateSettingsResponse resp = client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settings) + .get(); if (skipUnavailable != DEFAULT_SKIP_UNAVAILABLE) { String key = Strings.format("cluster.remote.%s.skip_unavailable", clusterAlias); assertEquals(String.valueOf(skipUnavailable), resp.getPersistentSettings().get(key)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 4bdbc81bcc3f0..71628967bf266 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -580,7 +580,7 @@ private void afterInternal(boolean afterClass) throws Exception { try { if (cluster() != null) { if (currentClusterScope != Scope.TEST) { - Metadata metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); final Set persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); @@ -873,25 +873,25 @@ private static Settings.Builder getExcludeSettings(int num, Settings.Builder bui * Waits until all nodes have no pending tasks. */ public void waitNoPendingTasksOnAll() throws Exception { - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get()); assertBusy(() -> { for (Client client : clients()) { - ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setLocal(true).get(); assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); PendingClusterTasksResponse pendingTasks = client.execute( TransportPendingClusterTasksAction.TYPE, - new PendingClusterTasksRequest().local(true) + new PendingClusterTasksRequest(TEST_REQUEST_TIMEOUT).local(true) ).get(); assertThat( "client " + client + " still has pending tasks " + pendingTasks, pendingTasks.pendingTasks(), Matchers.emptyIterable() ); - clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + clusterHealth = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setLocal(true).get(); assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); } }); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get()); } /** Ensures the result counts are as expected, and logs the results if different */ @@ -980,7 +980,7 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indices).masterNodeTimeout(timeout) .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) @@ -1019,10 +1019,10 @@ private ClusterHealthStatus ensureColor( new ClusterAllocationExplainRequest(TEST_REQUEST_TIMEOUT), listeners.acquire(allocationExplainRef::set) ); - clusterAdmin().prepareState().execute(listeners.acquire(clusterStateRef::set)); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).execute(listeners.acquire(clusterStateRef::set)); client().execute( TransportPendingClusterTasksAction.TYPE, - new PendingClusterTasksRequest(), + new PendingClusterTasksRequest(TEST_REQUEST_TIMEOUT), listeners.acquire(pendingTasksRef::set) ); try (var writer = new StringWriter()) { @@ -1075,7 +1075,7 @@ public ClusterHealthStatus waitForRelocation() { * using the cluster health API. */ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) { - ClusterHealthRequest request = new ClusterHealthRequest(new String[] {}).waitForNoRelocatingShards(true) + ClusterHealthRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForNoRelocatingShards(true) .waitForEvents(Priority.LANGUID); if (status != null) { request.waitForStatus(status); @@ -1085,7 +1085,7 @@ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) { logger.info( "waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, - clusterAdmin().prepareState().get().getState(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), getClusterPendingTasks() ); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); @@ -1102,7 +1102,8 @@ public static PendingClusterTasksResponse getClusterPendingTasks() { public static PendingClusterTasksResponse getClusterPendingTasks(Client client) { try { - return client.execute(TransportPendingClusterTasksAction.TYPE, new PendingClusterTasksRequest()).get(10, TimeUnit.SECONDS); + return client.execute(TransportPendingClusterTasksAction.TYPE, new PendingClusterTasksRequest(TEST_REQUEST_TIMEOUT)) + .get(10, TimeUnit.SECONDS); } catch (Exception e) { return fail(e); } @@ -1203,7 +1204,7 @@ public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalClu ClusterState state = internalCluster.client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setNodes(true) @@ -1219,13 +1220,17 @@ public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalClu * Prints the current cluster state as debug logging. */ public void logClusterState() { - logger.debug("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); + logger.debug( + "cluster state:\n{}\n{}", + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), + getClusterPendingTasks() + ); } protected void ensureClusterSizeConsistency() { if (cluster() != null && cluster().size() > 0) { // if static init fails the cluster can be null logger.trace("Check consistency for [{}] nodes", cluster().size()); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(cluster().size())).get()); } } @@ -1242,64 +1247,68 @@ protected final void doEnsureClusterStateConsistency(NamedWriteableRegistry name final PlainActionFuture future = new PlainActionFuture<>(); final List> localStates = new ArrayList<>(cluster().size()); for (Client client : cluster().getClients()) { - localStates.add(SubscribableListener.newForked(l -> client.admin().cluster().prepareState().all().setLocal(true).execute(l))); + localStates.add( + SubscribableListener.newForked( + l -> client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).all().setLocal(true).execute(l) + ) + ); } try (RefCountingListener refCountingListener = new RefCountingListener(future)) { - SubscribableListener.newForked(l -> client().admin().cluster().prepareState().all().execute(l)) - .andThenAccept(masterStateResponse -> { - byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterStateResponse.getState()); - // remove local node reference - final ClusterState masterClusterState = ClusterState.Builder.fromBytes( - masterClusterStateBytes, - null, - namedWriteableRegistry - ); - Map masterStateMap = convertToMap(masterClusterState); - int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; - String masterId = masterClusterState.nodes().getMasterNodeId(); - for (SubscribableListener localStateListener : localStates) { - localStateListener.andThenAccept(localClusterStateResponse -> { - byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterStateResponse.getState()); - // remove local node reference - final ClusterState localClusterState = ClusterState.Builder.fromBytes( - localClusterStateBytes, - null, - namedWriteableRegistry - ); - final Map localStateMap = convertToMap(localClusterState); - final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; - // Check that the non-master node has the same version of the cluster state as the master and - // that the master node matches the master (otherwise there is no requirement for the cluster state to - // match) - if (masterClusterState.version() == localClusterState.version() - && masterId.equals(localClusterState.nodes().getMasterNodeId())) { - try { - assertEquals( - "cluster state UUID does not match", - masterClusterState.stateUUID(), - localClusterState.stateUUID() - ); - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); - // Compare JSON serialization - assertNull( - "cluster state JSON serialization does not match", - differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap) - ); - } catch (final AssertionError error) { - logger.error( - "Cluster state from master:\n{}\nLocal cluster state:\n{}", - masterClusterState.toString(), - localClusterState.toString() - ); - throw error; - } + SubscribableListener.newForked( + l -> client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).all().execute(l) + ).andThenAccept(masterStateResponse -> { + byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterStateResponse.getState()); + // remove local node reference + final ClusterState masterClusterState = ClusterState.Builder.fromBytes( + masterClusterStateBytes, + null, + namedWriteableRegistry + ); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + String masterId = masterClusterState.nodes().getMasterNodeId(); + for (SubscribableListener localStateListener : localStates) { + localStateListener.andThenAccept(localClusterStateResponse -> { + byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterStateResponse.getState()); + // remove local node reference + final ClusterState localClusterState = ClusterState.Builder.fromBytes( + localClusterStateBytes, + null, + namedWriteableRegistry + ); + final Map localStateMap = convertToMap(localClusterState); + final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; + // Check that the non-master node has the same version of the cluster state as the master and + // that the master node matches the master (otherwise there is no requirement for the cluster state to + // match) + if (masterClusterState.version() == localClusterState.version() + && masterId.equals(localClusterState.nodes().getMasterNodeId())) { + try { + assertEquals( + "cluster state UUID does not match", + masterClusterState.stateUUID(), + localClusterState.stateUUID() + ); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); + // Compare JSON serialization + assertNull( + "cluster state JSON serialization does not match", + differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap) + ); + } catch (final AssertionError error) { + logger.error( + "Cluster state from master:\n{}\nLocal cluster state:\n{}", + masterClusterState.toString(), + localClusterState.toString() + ); + throw error; } - }).addListener(refCountingListener.acquire()); - } - }) - .addListener(refCountingListener.acquire()); + } + }).addListener(refCountingListener.acquire()); + } + }).addListener(refCountingListener.acquire()); } safeGet(future); } @@ -1307,7 +1316,7 @@ protected final void doEnsureClusterStateConsistency(NamedWriteableRegistry name protected void ensureClusterStateCanBeReadByNodeTool() throws IOException { if (cluster() != null && cluster().size() > 0) { final Client masterClient = client(); - Metadata metadata = masterClient.admin().cluster().prepareState().all().get().getState().metadata(); + Metadata metadata = masterClient.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().metadata(); final Map serializationParams = Maps.newMapWithExpectedSize(2); serializationParams.put("binary", "true"); serializationParams.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); @@ -1449,7 +1458,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue); ClusterHealthResponse clusterHealthResponse = client(viaNode).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(nodeCount)) .setTimeout(timeValue) @@ -1457,7 +1466,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l .setWaitForNoRelocatingShards(true) .get(); if (clusterHealthResponse.isTimedOut()) { - ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get(); + ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); fail( "failed to reach a stable cluster of [" + nodeCount @@ -1812,7 +1821,9 @@ public static void setClusterReadOnly(boolean value) { /** Sets cluster persistent settings **/ public static void updateClusterSettings(Settings.Builder persistentSettings) { - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(persistentSettings).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(persistentSettings).get() + ); } private static CountDownLatch newLatch(List latches) { @@ -2267,7 +2278,7 @@ public static Path randomRepoPath(Settings settings) { } protected NumShards getNumShards(String index) { - Metadata metadata = clusterAdmin().prepareState().get().getState().metadata(); + Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); assertThat(metadata.hasIndex(index), equalTo(true)); int numShards = Integer.valueOf(metadata.index(index).getSettings().get(SETTING_NUMBER_OF_SHARDS)); int numReplicas = Integer.valueOf(metadata.index(index).getSettings().get(SETTING_NUMBER_OF_REPLICAS)); @@ -2279,7 +2290,7 @@ protected NumShards getNumShards(String index) { */ public Set assertAllShardsOnNodes(String index, String... pattern) { Set nodes = new HashSet<>(); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShard = indexRoutingTable.shard(shardId); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index f0eb33445db20..6eb8e0474225a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -94,7 +94,7 @@ protected void startNode(long seed) throws Exception { // we must wait for the node to actually be up and running. otherwise the node might have started, // elected itself master but might not yet have removed the // SERVICE_UNAVAILABLE/1/state not recovered / initialized block - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertFalse(clusterHealthResponse.isTimedOut()); indicesAdmin().preparePutTemplate("one_shard_index_template") .setPatterns(Collections.singletonList("*")) @@ -154,7 +154,7 @@ public void tearDown() throws Exception { var deleteComponentTemplateRequest = new TransportDeleteComponentTemplateAction.Request("*"); assertAcked(client().execute(TransportDeleteComponentTemplateAction.TYPE, deleteComponentTemplateRequest).actionGet()); assertAcked(indicesAdmin().prepareDelete("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).get()); - Metadata metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); assertThat( "test leaves persistent cluster metadata behind: " + metadata.persistentSettings().keySet(), metadata.persistentSettings().size(), @@ -384,7 +384,9 @@ protected IndexService createIndex(String index, CreateIndexRequestBuilder creat // Wait for the index to be allocated so that cluster state updates don't override // changes that would have been done locally ClusterHealthResponse health = clusterAdmin().health( - new ClusterHealthRequest(index).waitForYellowStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, index).waitForYellowStatus() + .waitForEvents(Priority.LANGUID) + .waitForNoRelocatingShards(true) ).actionGet(); assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW)); assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1)); @@ -428,7 +430,7 @@ public ClusterHealthStatus ensureGreen(String... indices) { */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { ClusterHealthResponse actionGet = clusterAdmin().health( - new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indices).masterNodeTimeout(timeout) .timeout(timeout) .waitForGreenStatus() .waitForEvents(Priority.LANGUID) @@ -437,7 +439,7 @@ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { if (actionGet.isTimedOut()) { logger.info( "ensureGreen timed out, cluster state:\n{}\n{}", - clusterAdmin().prepareState().get().getState(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), ESIntegTestCase.getClusterPendingTasks(client()) ); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); @@ -462,8 +464,9 @@ protected boolean forbidPrivateIndexSettings() { * inspired by {@link ESRestTestCase} */ protected void ensureNoInitializingShards() { - ClusterHealthResponse actionGet = clusterAdmin().health(new ClusterHealthRequest("_all").waitForNoInitializingShards(true)) - .actionGet(); + ClusterHealthResponse actionGet = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "_all").waitForNoInitializingShards(true) + ).actionGet(); assertFalse("timed out waiting for shards to initialize", actionGet.isTimedOut()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 38b8dfecc0b5e..77762544c4718 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -142,6 +142,7 @@ import static org.elasticsearch.discovery.DiscoveryModule.MULTI_NODE_DISCOVERY_TYPE; import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; +import static org.elasticsearch.test.ESTestCase.TEST_REQUEST_TIMEOUT; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.ESTestCase.runInParallel; @@ -1240,7 +1241,7 @@ public synchronized void validateClusterFormed() { assertFalse( client().admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(expectedNodes.size())) .get(TimeValue.timeValueSeconds(40)) @@ -1481,7 +1482,7 @@ public void assertSeqNos() throws Exception { */ public void assertSameDocIdsOnShards() throws Exception { assertBusy(() -> { - ClusterState state = client().admin().cluster().prepareState().get().getState(); + ClusterState state = client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (var indexRoutingTable : state.routingTable().indicesRouting().values()) { for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(i); @@ -1928,7 +1929,7 @@ private Set excludeMasters(Collection nodeAndClients) { try { client().execute( TransportAddVotingConfigExclusionsAction.TYPE, - new AddVotingConfigExclusionsRequest(excludedNodeNames.toArray(Strings.EMPTY_ARRAY)) + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, excludedNodeNames.toArray(Strings.EMPTY_ARRAY)) ).get(); } catch (InterruptedException | ExecutionException e) { ESTestCase.fail(e); @@ -1944,7 +1945,10 @@ private void removeExclusions(Set excludedNodeIds) { logger.info("removing voting config exclusions for {} after restart/shutdown", excludedNodeIds); try { Client client = getRandomNodeAndClient(node -> excludedNodeIds.contains(node.name) == false).client(); - client.execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest()).get(); + client.execute( + TransportClearVotingConfigExclusionsAction.TYPE, + new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT) + ).get(); } catch (InterruptedException | ExecutionException e) { ESTestCase.fail(e); } @@ -2000,7 +2004,7 @@ public String getMasterName() { public String getMasterName(@Nullable String viaNode) { try { Client client = viaNode != null ? client(viaNode) : client(); - return client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(); + return client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(); } catch (Exception e) { logger.warn("Can't fetch cluster state", e); throw new RuntimeException("Can't get master node " + e.getMessage(), e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 67ff67ee6fe05..ea632599fedbf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -39,6 +39,7 @@ import java.util.Random; import java.util.Set; +import static org.elasticsearch.test.ESTestCase.TEST_REQUEST_TIMEOUT; import static org.elasticsearch.test.ESTestCase.safeAwait; /** @@ -84,7 +85,7 @@ public void wipe(Set excludeTemplates) { .newForked( l -> client().execute( DeleteDataStreamAction.INSTANCE, - new DeleteDataStreamAction.Request(ESTestCase.TEST_REQUEST_TIMEOUT, "*").indicesOptions( + new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*").indicesOptions( IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN ), l.delegateResponse((ll, e) -> { @@ -255,7 +256,7 @@ private void handleWipeIndicesFailure(Exception exception, boolean wipingAllIndi if (wipingAllIndices) { SubscribableListener - .newForked(l -> client().admin().cluster().prepareState().execute(l)) + .newForked(l -> client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).execute(l)) .andThen((l, clusterStateResponse) -> { ArrayList concreteIndices = new ArrayList<>(); for (IndexMetadata indexMetadata : clusterStateResponse.getState().metadata()) { @@ -336,7 +337,7 @@ private void wipeRepositories(ActionListener listener) { .newForked( l -> client().admin() .cluster() - .prepareDeleteRepository(ESTestCase.TEST_REQUEST_TIMEOUT, ESTestCase.TEST_REQUEST_TIMEOUT, "*") + .prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "*") .execute(l.delegateResponse((ll, e) -> { if (e instanceof RepositoryMissingException) { // ignore diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java index a70afedb6f221..e4ecbc015d7dc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java @@ -13,6 +13,7 @@ import java.util.Random; +import static org.elasticsearch.test.ESTestCase.TEST_REQUEST_TIMEOUT; import static org.junit.Assert.assertFalse; public abstract class SingleNodeDisruption implements ServiceDisruptionScheme { @@ -71,7 +72,7 @@ protected void ensureNodeCount(InternalTestCluster testCluster) { testCluster.client() .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(String.valueOf(testCluster.size())) .setWaitForNoRelocatingShards(true) .get() diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 56f957ff488d5..52ecc40c957b7 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -146,7 +146,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { */ protected void restartTaskNode(String id, String indexName) throws Exception { AsyncExecutionId searchId = AsyncExecutionId.decode(id); - final ClusterStateResponse clusterState = clusterAdmin().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setNodes(true).get(); DiscoveryNode node = clusterState.getState().nodes().get(searchId.getTaskId().getNodeId()); // Temporarily stop garbage collection, making sure to wait for any in-flight tasks to complete diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CCSUsageTelemetryAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CCSUsageTelemetryAsyncSearchIT.java index 4f8fa122047e1..9e17aa25154bb 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CCSUsageTelemetryAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CCSUsageTelemetryAsyncSearchIT.java @@ -349,7 +349,7 @@ private Map setupClusters() { assertFalse( client(clusterAlias).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 646ba1465c7c2..9d83f88a043e2 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -1742,7 +1742,7 @@ private Map setupTwoClusters() { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingFileSettingsIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingFileSettingsIT.java index 4da3d5ef08e07..59144df93834b 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingFileSettingsIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingFileSettingsIT.java @@ -134,7 +134,7 @@ private void assertPoliciesSaveOK(CountDownLatch savedClusterState, AtomicLong m assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); ReservedStateMetadata reservedState = clusterStateResponse.getState() diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java index 8d2a60773d29d..afb98f4c5e032 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java @@ -41,7 +41,7 @@ public void testDeletePolicy() { ); assertAcked(client().execute(DeleteAutoscalingPolicyAction.INSTANCE, deleteRequest).actionGet()); // now verify that the policy is not in the cluster state - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), not(hasKey(policy.name()))); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java index 1a49211601c4b..18fdc7f6fd35f 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java @@ -32,7 +32,7 @@ public class TransportPutAutoscalingPolicyActionIT extends AutoscalingIntegTestC public void testAddPolicy() { final AutoscalingPolicy policy = putRandomAutoscalingPolicy(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(policy.name())); @@ -47,7 +47,7 @@ public void testUpdatePolicy() { mutateAutoscalingDeciders(policy.deciders()) ); putAutoscalingPolicy(updatedPolicy); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(policy.name())); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java index 154b5cb7f7999..df1022e45c5a5 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderIT.java @@ -116,7 +116,7 @@ public void testZeroToOne() throws Exception { assertMinimumCapacity(capacity().results().get("frozen").requiredCapacity().node()); assertThat( - clusterAdmin().prepareHealth().get().getStatus(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN)) ); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 25bd08afcad72..076d5713d3ffc 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -378,7 +378,7 @@ public void testScaleWhileShrinking() throws Exception { equalTo(requiredSpaceForShrink + ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) ); - assertThat(clusterAdmin().prepareHealth(shrinkName).get().getUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, shrinkName).get().getUnassignedShards(), equalTo(1)); // test that the required amount is enough. // Adjust the amount since autoscaling calculates a node size to stay below low watermark though the shard can be @@ -387,7 +387,7 @@ public void testScaleWhileShrinking() throws Exception { assert tooLittleSpaceForShrink <= requiredSpaceForShrink; setTotalSpace(dataNode1Name, tooLittleSpaceForShrink); ClusterRerouteUtils.reroute(client()); - assertThat(clusterAdmin().prepareHealth(shrinkName).get().getUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, shrinkName).get().getUnassignedShards(), equalTo(1)); setTotalSpace(dataNode1Name, tooLittleSpaceForShrink + 1); ClusterRerouteUtils.reroute(client()); ensureGreen(); @@ -488,7 +488,7 @@ public void testScaleDuringSplitOrClone() throws Exception { equalTo(requiredSpaceForClone + ReactiveStorageDeciderService.NODE_DISK_OVERHEAD) ); - assertThat(clusterAdmin().prepareHealth(cloneName).get().getUnassignedShards(), equalTo(resizedShardCount)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, cloneName).get().getUnassignedShards(), equalTo(resizedShardCount)); // test that the required amount is enough. // Adjust the amount since autoscaling calculates a node size to stay below low watermark though the shard can be @@ -497,7 +497,7 @@ public void testScaleDuringSplitOrClone() throws Exception { assert tooLittleSpaceForClone <= requiredSpaceForClone; setTotalSpace(dataNode1Name, tooLittleSpaceForClone); ClusterRerouteUtils.reroute(client()); - assertThat(clusterAdmin().prepareHealth(cloneName).get().getUnassignedShards(), equalTo(resizedShardCount)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, cloneName).get().getUnassignedShards(), equalTo(resizedShardCount)); setTotalSpace(dataNode1Name, requiredSpaceForClone); ClusterRerouteUtils.reroute(client()); ensureGreen(); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 2d77d2c770845..5d1fc1686b35f 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -465,7 +465,7 @@ public void testPauseAndResumeAutoFollowPattern() throws Exception { assertThat( client.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices("copy-*") .setMetadata(true) @@ -564,7 +564,12 @@ public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception // check that all leader indices have been correctly auto followed List matchingPrefixes = Arrays.stream(prefixes).map(prefix -> prefix + "*").collect(Collectors.toList()); - for (IndexMetadata leaderIndexMetadata : leaderClient().admin().cluster().prepareState().get().getState().metadata()) { + for (IndexMetadata leaderIndexMetadata : leaderClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata()) { final String leaderIndex = leaderIndexMetadata.getIndex().getName(); if (Regex.simpleMatch(matchingPrefixes, leaderIndex)) { String followingIndex = "copy-" + leaderIndex; @@ -690,7 +695,7 @@ public void testAutoFollowDatastreamWithClosingFollowerIndex() throws Exception assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(3L)); }); - final Metadata metadata = followerClient().admin().cluster().prepareState().get().getState().metadata(); + final Metadata metadata = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); final DataStream dataStream = metadata.dataStreams().get(datastream); assertTrue(dataStream.getIndices().stream().anyMatch(i -> i.getName().equals(indexInDatastream))); assertEquals(IndexMetadata.State.OPEN, metadata.index(indexInDatastream).getState()); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java index e0aa7efa75126..45aac762c616a 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java @@ -396,7 +396,7 @@ private AliasMetadata getAliasMetadata(final GetAliasesResponse response, final private CheckedRunnable assertShardFollowTask(final int numberOfPrimaryShards) { return () -> { - final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + final ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertNotNull("task metadata for follower should exist", taskMetadata); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 423d555de9eab..7ce55313aa771 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -173,14 +173,14 @@ public void testThatRepositoryRecoversEmptyIndexBasedOnLeaderSettings() throws I ClusterStateResponse leaderState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(leaderIndex) .get(); ClusterStateResponse followerState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -402,7 +402,7 @@ public void testIndividualActionsTimeout() throws Exception { transportService.clearAllRules(); } - settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY); settingsRequest.persistentSettings( Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), defaultValue) @@ -417,7 +417,7 @@ public void testIndividualActionsTimeout() throws Exception { } private ClusterUpdateSettingsRequest newSettingsRequest() { - return new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + return new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).masterNodeTimeout(TimeValue.MAX_VALUE); } public void testFollowerMappingIsUpdated() throws IOException { @@ -482,7 +482,7 @@ public void testFollowerMappingIsUpdated() throws IOException { assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); assertEquals(0, restoreInfo.failedShards()); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT); clusterStateRequest.clear(); clusterStateRequest.metadata(true); clusterStateRequest.indices(followerIndex); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 716554eb3927c..696c16df31a25 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -118,7 +118,8 @@ private RestoreSnapshotRequest setUpRestoreSnapshotRequest( final String followerIndex, final int numberOfDocuments ) throws IOException { - final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .masterNodeTimeout(TimeValue.MAX_VALUE); final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize)); assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); @@ -227,7 +228,12 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { final CountDownLatch latch = new CountDownLatch(1); // block the recovery from completing; this ensures the background sync is still running - final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse followerClusterState = followerClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get(); for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( TransportService.class, @@ -303,7 +309,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(leaderIndex) @@ -335,7 +341,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -378,7 +384,7 @@ public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -430,7 +436,12 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { randomSubsetOf(randomIntBetween(0, numberOfShards), IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet())) ); - final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse followerClusterState = followerClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( @@ -523,7 +534,12 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { randomSubsetOf(randomIntBetween(1, numberOfShards), IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet())) ); - final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse followerClusterState = followerClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( @@ -554,7 +570,7 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -563,7 +579,7 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(leaderIndex) @@ -712,7 +728,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(leaderIndex) @@ -743,7 +759,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -786,7 +802,7 @@ public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws E assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.values(), hasSize(1)); final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -847,7 +863,12 @@ public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Excep final CountDownLatch latch = new CountDownLatch(1); - final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse followerClusterState = followerClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = (MockTransportService) getFollowerCluster().getInstance( @@ -956,7 +977,12 @@ public void testPeriodicRenewalDoesNotAddRetentionLeaseAfterUnfollow() throws Ex final CountDownLatch unfollowLatch = new CountDownLatch(1); final CountDownLatch responseLatch = new CountDownLatch(1); - final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + final ClusterStateResponse followerClusterState = followerClient().admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get(); try { for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { @@ -1054,7 +1080,7 @@ public void testForgetFollower() throws Exception { final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -1166,7 +1192,7 @@ private List getShardsStats(final IndicesStatsResponse stats) { private String getRetentionLeaseId(final String followerIndex, final String leaderIndex) { final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(followerIndex) @@ -1175,7 +1201,7 @@ private String getRetentionLeaseId(final String followerIndex, final String lead final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(leaderIndex) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java index 9e84cdac34008..d7b4fbe468318 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java @@ -106,7 +106,7 @@ public void testCloseAndReopenFollowerIndex() throws Exception { AcknowledgedResponse response = followerClient().admin().indices().close(closeIndexRequest).get(); assertThat(response.isAcknowledged(), is(true)); - ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("index2").getState(), is(IndexMetadata.State.CLOSE)); assertThat(clusterState.getBlocks().hasIndexBlock("index2", MetadataIndexStateService.INDEX_CLOSED_BLOCK), is(true)); @@ -117,7 +117,7 @@ public void testCloseAndReopenFollowerIndex() throws Exception { assertAcked(followerClient().admin().indices().open(new OpenIndexRequest("index2").masterNodeTimeout(TimeValue.MAX_VALUE)).get()); - clusterState = followerClient().admin().cluster().prepareState().get().getState(); + clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("index2").getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.getBlocks().hasIndexBlockWithId("index2", MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID), is(false)); ensureFollowerGreen("index2"); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 6361b6f89605e..e8940b2f41756 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -146,7 +146,7 @@ public void testFollowIndex() throws Exception { followerClient().admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.MAX_VALUE) .setPersistentSettings( Settings.builder() @@ -187,7 +187,7 @@ public void testFollowIndex() throws Exception { assertTrue(response.isFollowIndexShardsAcked()); assertTrue(response.isIndexFollowingStarted()); - ClusterHealthRequest healthRequest = new ClusterHealthRequest("index2").waitForNoRelocatingShards(true); + ClusterHealthRequest healthRequest = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "index2").waitForNoRelocatingShards(true); ClusterIndexHealth indexHealth = followerClient().admin().cluster().health(healthRequest).get().getIndices().get("index2"); for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) { if (waitOnAll) { @@ -999,7 +999,10 @@ public void testUnknownClusterAlias() throws Exception { public void testLeaderIndexRed() throws Exception { try { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")); assertAcked(leaderClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); assertAcked( @@ -1025,7 +1028,10 @@ public void testLeaderIndexRed() throws Exception { assertThat(ESIntegTestCase.indexExists("index2", followerClient()), is(false)); } finally { // Always unset allocation enable setting to avoid other assertions from failing too when this test fails: - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.routing.allocation.enable", (String) null)); assertAcked(leaderClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } @@ -1339,7 +1345,7 @@ public void testIndexFallBehind() throws Exception { // we have to remove the retention leases on the leader shards to ensure the follower falls behind final ClusterStateResponse followerIndexClusterState = followerClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("index2") @@ -1347,7 +1353,7 @@ public void testIndexFallBehind() throws Exception { final String followerUUID = followerIndexClusterState.getState().metadata().index("index2").getIndexUUID(); final ClusterStateResponse leaderIndexClusterState = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("index1") @@ -1356,7 +1362,7 @@ public void testIndexFallBehind() throws Exception { final RoutingTable leaderRoutingTable = leaderClient().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices("index1") .setRoutingTable(true) @@ -1514,7 +1520,8 @@ public void testUpdateRemoteConfigsDuringFollowing() throws Exception { logger.info("Indexing [{}] docs while updating remote config", firstBatchNumDocs); try (BackgroundIndexer indexer = new BackgroundIndexer("index1", leaderClient(), firstBatchNumDocs, randomIntBetween(1, 5))) { - ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace( "leader_cluster" @@ -1551,7 +1558,8 @@ public void testUpdateRemoteConfigsDuringFollowing() throws Exception { assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); } finally { - ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace( "leader_cluster" @@ -1650,7 +1658,7 @@ private BooleanSupplier hasFollowIndexBeenClosed(String indexName) { private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { return () -> { - final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + final ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertNotNull(taskMetadata); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/PrimaryFollowerAllocationIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/PrimaryFollowerAllocationIT.java index 16a2de7bf5b0f..27e6a2ade60c2 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/PrimaryFollowerAllocationIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/PrimaryFollowerAllocationIT.java @@ -121,7 +121,7 @@ public void testAllocateFollowerPrimaryToNodesWithRemoteClusterClientRole() thro } // Empty follower primaries must be assigned to nodes with the remote cluster client role assertBusy(() -> { - final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState().get().getState(); + final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexRoutingTable indexRoutingTable = state.routingTable().index(followerIndex); for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); @@ -143,7 +143,7 @@ public void testAllocateFollowerPrimaryToNodesWithRemoteClusterClientRole() thro ) .get(); assertBusy(() -> { - final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState().get().getState(); + final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexRoutingTable indexRoutingTable = state.routingTable().index(followerIndex); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId); @@ -160,7 +160,7 @@ public void testAllocateFollowerPrimaryToNodesWithRemoteClusterClientRole() thro getFollowerCluster().fullRestart(); ensureFollowerGreen(followerIndex); assertBusy(() -> { - final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState().get().getState(); + final ClusterState state = getFollowerCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexRoutingTable indexRoutingTable = state.routingTable().index(followerIndex); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java index 18456b24d4618..3a169037c422d 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -135,7 +135,8 @@ public void testFollowIndex() throws Exception { private void setupRemoteCluster() throws Exception { var remoteMaxPendingConnectionListeners = getRemoteMaxPendingConnectionListeners(); - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getAnyMasterNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", address)); assertAcked(followerClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); @@ -161,7 +162,8 @@ private Integer getRemoteMaxPendingConnectionListeners() { } private void cleanRemoteCluster() throws Exception { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .masterNodeTimeout(TimeValue.MAX_VALUE); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", (String) null)); assertAcked(followerClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 82af24d2293cc..c16c01431bd28 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -300,7 +300,7 @@ void getRemoteClusterState( CcrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( client, remoteCluster, - new ClusterStateRequest().clear() + new ClusterStateRequest(waitForMetadataTimeOut).clear() .metadata(true) .routingTable(true) .local(true) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java index 77eea2a452bb7..a81542f3371e1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java @@ -34,7 +34,7 @@ public final class CcrRequests { private CcrRequests() {} public static ClusterStateRequest metadataRequest(String leaderIndex) { - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(TimeValue.MAX_VALUE); clusterStateRequest.clear(); clusterStateRequest.metadata(true); clusterStateRequest.indices(leaderIndex); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index b990b738e4bc9..49cff2bdde7ec 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -133,7 +133,7 @@ public ClusterState execute(ClusterState currentState) { CcrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( client, request.getRemoteCluster(), - new ClusterStateRequest().clear().metadata(true), + new ClusterStateRequest(request.masterNodeTimeout()).clear().metadata(true), listener::onFailure, consumer ); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 97e3a409d590d..a19fde8196ea8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -166,7 +166,7 @@ public CcrRepository(RepositoryMetadata metadata, Client client, Settings settin threadPool.getThreadContext(), l -> getRemoteClusterClient().execute( ClusterStateAction.REMOTE_TYPE, - new ClusterStateRequest().clear().metadata(true).nodes(true).masterNodeTimeout(TimeValue.MAX_VALUE), + new ClusterStateRequest(TimeValue.MAX_VALUE).clear().metadata(true).nodes(true), l.map(ClusterStateResponse::getState) ) ); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 677a82ddafa34..1b7875e4a36b4 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -409,7 +409,7 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indices).masterNodeTimeout(timeout) .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) @@ -431,9 +431,9 @@ private ClusterHealthStatus ensureColor( follower cluster tasks: {}""", method, - leaderClient().admin().cluster().prepareState().get().getState(), + leaderClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), ESIntegTestCase.getClusterPendingTasks(leaderClient()), - followerClient().admin().cluster().prepareState().get().getState(), + followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), ESIntegTestCase.getClusterPendingTasks(followerClient()) ); HotThreads.logLocalHotThreads(logger, Level.INFO, "hot threads at timeout", ReferenceDocs.LOGGING); @@ -502,7 +502,7 @@ protected void ensureNoCcrTasks() throws Exception { empty() ); - final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + final ClusterState clusterState = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); PersistentTasksCustomMetadata tasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); Collection> ccrTasks = tasks.tasks() .stream() @@ -661,7 +661,7 @@ protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String f } private Map> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException { - final ClusterState state = cluster.client().admin().cluster().prepareState().get().getState(); + final ClusterState state = cluster.client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List shardRoutings = state.routingTable().allShards(index); Randomness.shuffle(shardRoutings); final Map> docs = new HashMap<>(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index 6b69c172c0df3..fbb7418bc6e93 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -61,7 +61,7 @@ protected Collection> getPlugins() { @Before public void setupLocalRemote() throws Exception { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); String address = getInstanceFromNode(TransportService.class).boundAddress().publishAddress().toString(); updateSettingsRequest.transientSettings(Settings.builder().put("cluster.remote.local.seeds", address)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); @@ -84,7 +84,7 @@ public void purgeCCRMetadata() throws Exception { @After public void removeLocalRemote() throws Exception { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); updateSettingsRequest.transientSettings(Settings.builder().put("cluster.remote.local.seeds", (String) null)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java index 1202f828059f6..3193e3647f4ad 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java @@ -19,7 +19,10 @@ public class ComponentVersionsNodesInfoIT extends ESIntegTestCase { public void testNodesInfoComponentVersions() { final String node_1 = internalCluster().startNode(); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("1").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("1") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index 5c02288e704f7..0fea3c0d3b74f 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -349,11 +349,23 @@ private IndexRequestBuilder[] snapshotAndRestore(final String sourceIdx, final b logger.info("--> delete index and stop the data node"); assertAcked(client().admin().indices().prepareDelete(sourceIdx).get()); internalCluster().stopRandomDataNode(); - assertFalse(clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1").get().isTimedOut()); + assertFalse( + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setTimeout(TimeValue.timeValueSeconds(30)) + .setWaitForNodes("1") + .get() + .isTimedOut() + ); final String newDataNode = internalCluster().startDataOnlyNode(); logger.info("--> start a new data node " + newDataNode); - assertFalse(clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2").get().isTimedOut()); + assertFalse( + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setTimeout(TimeValue.timeValueSeconds(30)) + .setWaitForNodes("2") + .get() + .isTimedOut() + ); logger.info("--> restore the index and ensure all shards are allocated"); RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index add11d373b401..ab3234f6d3d73 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -76,7 +76,10 @@ public void testDefaultIndexAllocateToContent() { assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_CONTENT)); // index should be red - assertThat(clusterAdmin().prepareHealth(index).get().getIndices().get(index).getStatus(), equalTo(ClusterHealthStatus.RED)); + assertThat( + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, index).get().getIndices().get(index).getStatus(), + equalTo(ClusterHealthStatus.RED) + ); if (randomBoolean()) { logger.info("--> starting content node"); @@ -518,7 +521,14 @@ private void updateDesiredNodes(DesiredNode... desiredNodes) { private void updateDesiredNodes(List desiredNodes) { assertThat(desiredNodes.size(), is(greaterThan(0))); - final var request = new UpdateDesiredNodesRequest(randomAlphaOfLength(10), 1, desiredNodes, false); + final var request = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + randomAlphaOfLength(10), + 1, + desiredNodes, + false + ); internalCluster().client().execute(UpdateDesiredNodesAction.INSTANCE, request).actionGet(); } @@ -533,7 +543,7 @@ private void assertPrimaryShardIsAllocatedInNode(int shard, DesiredNode expected } private DiscoveryNode getPrimaryShardAssignedNode(int shard) { - final var state = clusterAdmin().prepareState().get().getState(); + final var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final var routingTable = state.routingTable().index(index).shard(shard); final var primaryShard = routingTable.primaryShard(); final var discoveryNode = state.nodes().get(primaryShard.currentNodeId()); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java index 368946d79682c..9937efcc406ff 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java @@ -183,7 +183,7 @@ private String findNodeWithReplicaShard(String indexName, int shard) { } private String findNodeWithShard(final String indexName, final int shard, final boolean primary) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); startedShards = startedShards.stream() .filter(shardRouting -> shardRouting.getIndexName().equals(indexName)) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java index 07be597c7024e..95753f02e396d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java @@ -72,8 +72,7 @@ public static void createAnnotationsIndexIfNecessaryAndWaitForYellow( ) { final ActionListener annotationsIndexCreatedListener = finalListener.delegateFailureAndWrap((delegate, success) -> { - final ClusterHealthRequest request = new ClusterHealthRequest(READ_ALIAS_NAME).waitForYellowStatus() - .masterNodeTimeout(masterNodeTimeout); + final ClusterHealthRequest request = new ClusterHealthRequest(masterNodeTimeout, READ_ALIAS_NAME).waitForYellowStatus(); executeAsyncWithOrigin( client, ML_ORIGIN, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index d81541698e49b..0acc953c24039 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -103,9 +103,10 @@ public static void createStateIndexAndAliasIfNecessaryAndWaitForYellow( final ActionListener finalListener ) { final ActionListener stateIndexAndAliasCreated = finalListener.delegateFailureAndWrap((delegate, success) -> { - final ClusterHealthRequest request = new ClusterHealthRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias()) - .waitForYellowStatus() - .masterNodeTimeout(masterNodeTimeout); + final ClusterHealthRequest request = new ClusterHealthRequest( + masterNodeTimeout, + AnomalyDetectorsIndex.jobStateIndexWriteAlias() + ).waitForYellowStatus(); executeAsyncWithOrigin( client, ML_ORIGIN, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java index d4ec7563b868b..1603ad67718c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -224,10 +224,9 @@ public static void createSystemIndexIfNecessary( } private static void waitForShardsReady(Client client, String index, TimeValue masterNodeTimeout, ActionListener listener) { - ClusterHealthRequest healthRequest = new ClusterHealthRequest(index).waitForYellowStatus() + ClusterHealthRequest healthRequest = new ClusterHealthRequest(masterNodeTimeout, index).waitForYellowStatus() .waitForNoRelocatingShards(true) - .waitForNoInitializingShards(true) - .masterNodeTimeout(masterNodeTimeout); + .waitForNoInitializingShards(true); executeAsyncWithOrigin( client.threadPool().getThreadContext(), ML_ORIGIN, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index df4f7828d1fed..100f7843713bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -70,7 +70,7 @@ public void testExecuteAsyncWrapsListener() throws Exception { latch.countDown(); }); - final ClusterHealthRequest request = new ClusterHealthRequest(); + final ClusterHealthRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); threadContext.putHeader(headerName, headerValue); ClientHelper.executeAsyncWithOrigin(threadContext, origin, request, listener, (req, listener1) -> { @@ -110,7 +110,13 @@ public void testExecuteWithClient() throws Exception { }).when(client).execute(any(), any(), any()); threadContext.putHeader(headerName, headerValue); - ClientHelper.executeAsyncWithOrigin(client, origin, TransportClusterHealthAction.TYPE, new ClusterHealthRequest(), listener); + ClientHelper.executeAsyncWithOrigin( + client, + origin, + TransportClusterHealthAction.TYPE, + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT), + listener + ); latch.await(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncSearchIndexServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncSearchIndexServiceTests.java index 0afc611f43d4d..4096b5f30f0fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncSearchIndexServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncSearchIndexServiceTests.java @@ -360,7 +360,10 @@ public void testMaxAsyncSearchResponseSize() throws Exception { // setting very small limit for the max size of async search response int limit = randomIntBetween(1, 125); - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings(Settings.builder().put("search.max_async_search_response_size", limit + "b")); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); String expectedErrMsg = "Can't store an async search response larger than [" @@ -393,7 +396,10 @@ public void testMaxAsyncSearchResponseSize() throws Exception { assertEquals(expectedErrMsg, e2.getMessage()); } finally { // restoring limit - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings(Settings.builder().put("search.max_async_search_response_size", (String) null)); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 3a696a196175f..6a615d648a850 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -1176,7 +1176,11 @@ private void assertDownsampleIndex(String sourceIndex, String downsampleIndex, D .map(mappingMetadata -> mappingMetadata.getValue().sourceAsMap()) .orElseThrow(() -> new IllegalArgumentException("No mapping found for downsample source index [" + sourceIndex + "]")); - final IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().getMetadata().index(sourceIndex); + final IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getMetadata() + .index(sourceIndex); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final MapperService mapperService = indicesService.createIndexMapperServiceForValidation(indexMetadata); final CompressedXContent sourceIndexCompressedXContent = new CompressedXContent(sourceIndexMappings); diff --git a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java index 62a5098f7a1e4..24669b694a33b 100644 --- a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java +++ b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichMultiNodeIT.java @@ -203,7 +203,7 @@ public void testExecutePolicyWithDedicatedMasterNodes() throws Exception { var getTaskRequest = new GetTaskRequest().setTaskId(executePolicyResponse.getTaskId()).setWaitForCompletion(true); clusterAdmin().getTask(getTaskRequest).actionGet(); - var discoNodes = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState().nodes(); + var discoNodes = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState().nodes(); assertThat(discoNodes.get(executePolicyResponse.getTaskId().getNodeId()).isMasterNode(), is(false)); } @@ -230,7 +230,7 @@ public void testExecutePolicyNeverOnElectedMaster() throws Exception { var getTaskRequest = new GetTaskRequest().setTaskId(executePolicyResponse.getTaskId()).setWaitForCompletion(true); clusterAdmin().getTask(getTaskRequest).actionGet(); - var discoNodes = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState().nodes(); + var discoNodes = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState().nodes(); assertThat(executePolicyResponse.getTaskId().getNodeId(), not(equalTo(discoNodes.getMasterNodeId()))); } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index cab115ddc4964..298a7c307efa5 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.Mapper; @@ -684,7 +685,10 @@ private void setIndexReadOnly(ActionListener listener) { } private void waitForIndexGreen(ActionListener listener) { - ClusterHealthRequest request = new ClusterHealthRequest(enrichIndexName).waitForGreenStatus(); + ClusterHealthRequest request = new ClusterHealthRequest( + TimeValue.THIRTY_SECONDS /* TODO should this be longer/configurable? */ , + enrichIndexName + ).waitForGreenStatus(); enrichOriginClient().admin().cluster().health(request, listener); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index 32f39b0de1ef4..568f1074d1a5f 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -136,7 +136,7 @@ public void testDeleteIsNotLocked() throws Exception { Settings settings = Settings.builder() .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), destructiveRequiresName) .build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(settings)); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(settings)); } createIndex(EnrichPolicy.getIndexName(name, 1001)); @@ -174,7 +174,7 @@ public void onFailure(final Exception e) { if (destructiveRequiresName) { Settings settings = Settings.builder().putNull(DestructiveOperations.REQUIRES_NAME_SETTING.getKey()).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(settings)); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(settings)); } EnrichPolicyLocks enrichPolicyLocks = getInstanceFromNode(EnrichPolicyLocks.class); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 84738f733f86b..00efeb37a033b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -118,14 +118,14 @@ protected Collection> nodePlugins() { protected void setRequestCircuitBreakerLimit(ByteSizeValue limit) { if (limit != null) { assertAcked( - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings( Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), limit).build() ) ); } else { assertAcked( - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings( Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()).build() ) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index 8d1d81795bf46..35c37eea10362 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -128,7 +128,7 @@ public void testMetadataIndex() { void waitForNoInitializingShards(Client client, TimeValue timeout, String... indices) { ClusterHealthResponse resp = client.admin() .cluster() - .prepareHealth(indices) + .prepareHealth(TEST_REQUEST_TIMEOUT, indices) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setWaitForNoInitializingShards(true) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 0ec2f0da2d2a6..e0bef22718d0d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -1643,7 +1643,8 @@ public void testDefaultTruncationSizeSetting() { Settings settings = Settings.builder().put(EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getKey(), 1).build(); - ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().persistentSettings(settings); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(settings); client.updateSettings(settingsRequest).actionGet(); try (EsqlQueryResponse results = run("from test")) { @@ -1659,7 +1660,8 @@ public void testMaxTruncationSizeSetting() { Settings settings = Settings.builder().put(EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getKey(), 10).build(); - ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().persistentSettings(settings); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .persistentSettings(settings); client.updateSettings(settingsRequest).actionGet(); try (EsqlQueryResponse results = run("from test | limit 40")) { @@ -1677,7 +1679,9 @@ private void clearPersistentSettings(Setting... settings) { clearedSettings.putNull(s.getKey()); } - var clearSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings(clearedSettings.build()); + var clearSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( + clearedSettings.build() + ); admin().cluster().updateSettings(clearSettingsRequest).actionGet(); } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index ad9900b5b0164..3f7607d9032e5 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -96,7 +96,7 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx final String excludeSetting = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(); updateIndexSettings(Settings.builder().put(excludeSetting, nodeNames.get(0)), "index"); ClusterRerouteUtils.reroute(client(), new CancelAllocationCommand("index", 0, nodeNames.get(0), true)); - assertThat(clusterAdmin().prepareHealth("index").get().getUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").get().getUnassignedShards(), equalTo(1)); assertThat(client().prepareDelete("index", indexResponse.getId()).get().status(), equalTo(RestStatus.OK)); @@ -108,20 +108,20 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx ); assertThat( - clusterAdmin().prepareState().get().getState().metadata().index("index").getTimestampRange(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index").getTimestampRange(), sameInstance(IndexLongFieldRange.EMPTY) ); internalCluster().stopNode(nodeNames.get(1)); - assertThat(clusterAdmin().prepareHealth("index").get().getUnassignedShards(), equalTo(2)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").get().getUnassignedShards(), equalTo(2)); updateIndexSettings(Settings.builder().putNull(excludeSetting), "index"); - assertThat(clusterAdmin().prepareHealth("index").get().getUnassignedShards(), equalTo(2)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").get().getUnassignedShards(), equalTo(2)); ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand("index", 0, nodeNames.get(0), true)); ensureYellowAndNoInitializingShards("index"); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); @@ -129,7 +129,7 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); - IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() + IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -188,7 +188,7 @@ public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() ) ); - final Index index = clusterAdmin().prepareState() + final Index index = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices("index") .setMetadata(true) @@ -292,7 +292,7 @@ public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() t ) ); - final Index index = clusterAdmin().prepareState() + final Index index = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices("index") .setMetadata(true) diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index ccb917c9dbda5..8ba88865e361a 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -317,7 +317,7 @@ public void testUnfreezeClosedIndices() { .indicesOptions(IndicesOptions.strictExpand()) ) ); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertEquals(IndexMetadata.State.CLOSE, stateResponse.getState().getMetadata().index("idx-closed").getState()); assertEquals(IndexMetadata.State.OPEN, stateResponse.getState().getMetadata().index("idx").getState()); assertHitCount(client().prepareSearch(), 1L); @@ -516,7 +516,10 @@ public void testIgnoreUnavailable() { ) ); assertIndexFrozen("idx"); - assertEquals(IndexMetadata.State.CLOSE, clusterAdmin().prepareState().get().getState().metadata().index("idx-close").getState()); + assertEquals( + IndexMetadata.State.CLOSE, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("idx-close").getState() + ); } public void testUnfreezeClosedIndex() { @@ -525,7 +528,10 @@ public void testUnfreezeClosedIndex() { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "idx")).actionGet() ); assertAcked(indicesAdmin().prepareClose("idx")); - assertEquals(IndexMetadata.State.CLOSE, clusterAdmin().prepareState().get().getState().metadata().index("idx").getState()); + assertEquals( + IndexMetadata.State.CLOSE, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("idx").getState() + ); expectThrows( IndexNotFoundException.class, client().execute( @@ -545,7 +551,10 @@ public void testUnfreezeClosedIndex() { new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "idx").setFreeze(false) ).actionGet() ); - assertEquals(IndexMetadata.State.OPEN, clusterAdmin().prepareState().get().getState().metadata().index("idx").getState()); + assertEquals( + IndexMetadata.State.OPEN, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("idx").getState() + ); } public void testFreezeIndexIncreasesIndexSettingsVersion() { @@ -553,14 +562,19 @@ public void testFreezeIndexIncreasesIndexSettingsVersion() { createIndex(index, indexSettings(1, 0).build()); prepareIndex(index).setSource("field", "value").get(); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index(index).getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(index) + .getSettingsVersion(); assertAcked( client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, index)).actionGet() ); assertIndexFrozen(index); assertThat( - clusterAdmin().prepareState().get().getState().metadata().index(index).getSettingsVersion(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index(index).getSettingsVersion(), greaterThan(settingsVersion) ); } @@ -577,7 +591,7 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { final IndicesService indicesService = getInstanceFromNode(IndicesService.class); assertBusy(() -> { - final Index index = clusterAdmin().prepareState().get().getState().metadata().index(indexName).getIndex(); + final Index index = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index(indexName).getIndex(); final IndexService indexService = indicesService.indexService(index); assertThat(indexService.hasShard(0), is(true)); assertThat(indexService.getShard(0).getLastKnownGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); @@ -606,7 +620,7 @@ public void testRecoveryState() { ); assertIndexFrozen(indexName); - final IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index(indexName); + final IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index(indexName); final IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(indexMetadata.getIndex()); for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) { final IndexShard indexShard = indexService.getShardOrNull(i); @@ -674,7 +688,7 @@ public void testComputesTimestampRangeFromMilliseconds() { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); @@ -706,7 +720,7 @@ public void testComputesTimestampRangeFromNanoseconds() throws IOException { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange timestampFieldRange = indexMetadata.getTimestampRange(); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); @@ -728,7 +742,7 @@ public void testComputesEventIngestedRangeFromMilliseconds() { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); @@ -760,7 +774,7 @@ public void testComputesEventIngestedRangeFromNanoseconds() throws IOException { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); @@ -784,7 +798,7 @@ public void testComputesEventIngestedAndTimestampRangesWhenBothPresent() { client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("index"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("index"); final IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); assertThat(eventIngestedRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 30d1d6f7c914b..d06a9f9cc19b1 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -180,7 +180,7 @@ public void testSingleNodeCluster() throws Exception { logger.info("Creating index [test]"); CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet(); assertAcked(createIndexResponse); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); assertBusy(() -> { assertTrue(indexExists("test")); }); @@ -188,7 +188,7 @@ public void testSingleNodeCluster() throws Exception { assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); assertNotNull(indexLifecycleService.getScheduledJob()); assertBusy(() -> { - LifecycleExecutionState lifecycleState = clusterAdmin().prepareState() + LifecycleExecutionState lifecycleState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getMetadata() @@ -425,13 +425,13 @@ public void testMasterDedicatedDataDedicated() throws Exception { CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet(); assertAcked(createIndexResponse); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node2); assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(1)); assertBusy(() -> assertTrue(indexExists("test"))); assertBusy(() -> { - LifecycleExecutionState lifecycleState = clusterAdmin().prepareState() + LifecycleExecutionState lifecycleState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getMetadata() diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java index 623b676e0e0ee..5287d149fae3d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -91,7 +91,7 @@ public void testLookbackOnly() throws Exception { indexDocs(logger, "data-1", numDocs, twoWeeksAgo, oneWeekAgo); client().admin().indices().prepareCreate("data-2").setMapping("time", "type=date").get(); - clusterAdmin().prepareHealth("data-1", "data-2").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "data-1", "data-2").setWaitForYellowStatus().get(); long numDocs2 = randomIntBetween(32, 2048); indexDocs(logger, "data-2", numDocs2, oneWeekAgo, now); @@ -137,7 +137,7 @@ public void testLookbackOnlyDataStream() throws Exception { long twoWeeksAgo = oneWeekAgo - 604800000; indexDocs(logger, "datafeed_data_stream", numDocs, twoWeeksAgo, oneWeekAgo); - clusterAdmin().prepareHealth("datafeed_data_stream").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "datafeed_data_stream").setWaitForYellowStatus().get(); Job.Builder job = createScheduledJob("lookback-data-stream-job"); PutJobAction.Response putJobResponse = putJob(job); @@ -320,7 +320,7 @@ public void testStopAndRestartCompositeDatafeed() throws Exception { Intervals.alignToCeil(oneWeekAgo, intervalMillis), Intervals.alignToFloor(now, intervalMillis) ); - clusterAdmin().prepareHealth(indexName).setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForYellowStatus().get(); String scrollJobId = "stop-restart-scroll"; Job.Builder scrollJob = createScheduledJob(scrollJobId); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java index 90f9c721d25a9..e8acc37e0e153 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java @@ -281,7 +281,7 @@ private void assertInferenceModelPersisted(String jobId, Matcher> analyticsTaskList() { - ClusterState masterClusterState = clusterAdmin().prepareState().all().get().getState(); + ClusterState masterClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); PersistentTasksCustomMetadata persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); return persistentTasks != null ? persistentTasks.findTasks(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, task -> true) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index ca5ecd80a83bb..3b705e63a145f 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -279,7 +279,7 @@ protected void setUpgradeModeTo(boolean enabled) { } protected boolean upgradeMode() { - ClusterState masterClusterState = clusterAdmin().prepareState().all().get().getState(); + ClusterState masterClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); MlMetadata mlMetadata = MlMetadata.getMlMetadata(masterClusterState); return mlMetadata.isUpgradeMode(); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java index 1233004552023..1f38fb05cb6ef 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -62,7 +62,7 @@ public void testEnableUpgradeMode() throws Exception { hasSize(2) ); - ClusterState masterClusterState = clusterAdmin().prepareState().all().get().getState(); + ClusterState masterClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); PersistentTasksCustomMetadata persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true), hasSize(1)); @@ -71,7 +71,7 @@ public void testEnableUpgradeMode() throws Exception { // Set the upgrade mode setting setUpgradeModeTo(true); - masterClusterState = clusterAdmin().prepareState().all().get().getState(); + masterClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); // Assert state for tasks still exists and that the upgrade setting is set persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); @@ -99,7 +99,7 @@ public void testEnableUpgradeMode() throws Exception { // Disable the setting setUpgradeModeTo(false); - masterClusterState = clusterAdmin().prepareState().all().get().getState(); + masterClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true), hasSize(1)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index 1b1ad986bc8a1..da86bcf01b406 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -133,7 +133,9 @@ public void testMLFeatureReset() throws Exception { client().execute(DeletePipelineTransportAction.TYPE, new DeletePipelineRequest("feature_reset_inference_pipeline")).actionGet(); createdPipelines.remove("feature_reset_inference_pipeline"); - assertBusy(() -> assertThat(countInferenceProcessors(clusterAdmin().prepareState().get().getState()), equalTo(0))); + assertBusy( + () -> assertThat(countInferenceProcessors(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState()), equalTo(0)) + ); client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); @@ -224,7 +226,7 @@ void createModelDeployment() { } private boolean isResetMode() { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return MlMetadata.getMlMetadata(state).isResetMode(); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index 592f42e13e301..98ad515680734 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -251,7 +251,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { DatafeedState datafeedState = getDatafeedStats(datafeedId).getDatafeedState(); assertEquals(DatafeedState.STOPPED, datafeedState); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); @@ -276,7 +276,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { DatafeedState datafeedState = getDatafeedStats(datafeedId).getDatafeedState(); assertEquals(DatafeedState.STARTED, datafeedState); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, RELATED_TASKS); assertEquals(2, tasks.size()); }); @@ -296,7 +296,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { DatafeedState datafeedState = getDatafeedStats(datafeedId).getDatafeedState(); assertEquals(DatafeedState.STOPPED, datafeedState); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); @@ -336,7 +336,7 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception assertBusy(() -> { JobState jobState = getJobStats(jobId).getState(); assertEquals(JobState.CLOSED, jobState); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 6dbec53994b2e..a6eecfeb94c32 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -230,7 +230,7 @@ public void testDedicatedMlNode() throws Exception { OpenJobAction.Request openJobRequest = new OpenJobAction.Request(job.getId()); client().execute(OpenJobAction.INSTANCE, openJobRequest).actionGet(); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); PersistentTask task = tasks.getTask(MlTasks.jobTaskId(jobId)); @@ -328,7 +328,7 @@ public void testMaxConcurrentJobAllocations() throws Exception { } ensureStableCluster(1, nonMlNode); assertBusy(() -> { - ClusterState state = client(nonMlNode).admin().cluster().prepareState().get().getState(); + ClusterState state = client(nonMlNode).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, MlTasks.JOB_TASK_NAME); assertEquals(numJobs, tasks.size()); for (PersistentTask task : tasks) { @@ -411,7 +411,7 @@ public void testMlStateAndResultsIndicesNotAvailable() throws Exception { CloseJobAction.Request closeJobRequest = new CloseJobAction.Request(jobId); client().execute(CloseJobAction.INSTANCE, closeJobRequest).actionGet(); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(clusterState, MlTasks.JOB_TASK_NAME); assertEquals(0, tasks.size()); }); @@ -502,7 +502,7 @@ public void testCloseUnassignedLazyJobAndDatafeed() { } private void assertJobTask(String jobId, JobState expectedState, boolean hasExecutorNode) { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(clusterState, MlTasks.JOB_TASK_NAME); assertEquals(1, tasks.size()); PersistentTask task = tasks.get(0); @@ -524,7 +524,7 @@ private void assertJobTask(String jobId, JobState expectedState, boolean hasExec private CheckedRunnable checkAllJobsAreAssignedAndOpened(int numJobs) { return () -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, MlTasks.JOB_TASK_NAME); assertEquals(numJobs, tasks.size()); for (PersistentTask task : tasks) { diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java index b71ecd4858533..8fddfa47c377c 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java @@ -244,7 +244,7 @@ private void setupJobAndDatafeed(String jobId, String datafeedId, Long endTimeMs private void setSkipUnavailable(boolean skip) { client(LOCAL_CLUSTER).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put("cluster.remote." + REMOTE_CLUSTER + ".skip_unavailable", skip).build()) .get(); } @@ -252,7 +252,7 @@ private void setSkipUnavailable(boolean skip) { private void clearSkipUnavailable() { client(LOCAL_CLUSTER).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull("cluster.remote." + REMOTE_CLUSTER + ".skip_unavailable").build()) .get(); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 33fd7c108863b..17fe20c5115ff 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -135,7 +135,12 @@ public void testLoseDedicatedMasterNode() throws Exception { Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); internalCluster().stopCurrentMasterNode(); assertBusy(() -> { - ClusterState state = client(mlAndDataNode).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(mlAndDataNode).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertNull(state.nodes().getMasterNodeId()); }); logger.info("Restarting dedicated master node"); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index b62a524245d88..f6a58002bbac5 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -63,7 +63,7 @@ public void testCloseFailedJob() throws Exception { new GetJobsStatsAction.Request("close-failed-job-2") ).actionGet(); assertEquals(statsResponse.getResponse().results().get(0).getState(), JobState.CLOSED); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List> tasks = findTasks(state, MlTasks.JOB_TASK_NAME); assertEquals(1, tasks.size()); // now just double check that the first job is still opened: @@ -147,7 +147,10 @@ private void verifyMaxNumberOfJobsLimit(int numNodes, int maxNumberOfJobsPerNode boolean expectMemoryLimitBeforeCountLimit = maxJobsPerNodeDueToMemoryLimit < maxNumberOfJobsPerNode; for (int i = 1; i <= (clusterWideMaxNumberOfJobs + 1); i++) { if (i == 2 && testDynamicChange) { - ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings( + ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ).persistentSettings( Settings.builder().put(MachineLearning.MAX_OPEN_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode).build() ); client().execute(ClusterUpdateSettingsAction.INSTANCE, clusterUpdateSettingsRequest).actionGet(); @@ -215,7 +218,7 @@ private void verifyMaxNumberOfJobsLimit(int numNodes, int maxNumberOfJobsPerNode for (Client client : clients()) { PersistentTasksCustomMetadata tasks = client.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getMetadata() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 33e5582ec992a..ebc92eee61ea2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -74,7 +74,7 @@ public MockClientBuilder addClusterStatusYellowResponse() { PlainActionFuture actionFuture = mock(PlainActionFuture.class); ClusterHealthRequestBuilder clusterHealthRequestBuilder = mock(ClusterHealthRequestBuilder.class); - when(clusterAdminClient.prepareHealth()).thenReturn(clusterHealthRequestBuilder); + when(clusterAdminClient.prepareHealth(any())).thenReturn(clusterHealthRequestBuilder); when(clusterHealthRequestBuilder.setWaitForYellowStatus()).thenReturn(clusterHealthRequestBuilder); when(clusterHealthRequestBuilder.execute()).thenReturn(actionFuture); when(actionFuture.actionGet()).thenReturn(mock(ClusterHealthResponse.class)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 5aaaa3ff958fd..4ac028ec3af21 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -513,7 +513,7 @@ protected String awaitJobOpenedAndAssigned(String jobId, String queryNode) throw } protected void assertRecentLastTaskStateChangeTime(String taskId, Duration howRecent, String queryNode) { - ClusterStateRequest csRequest = new ClusterStateRequest().clear().metadata(true); + ClusterStateRequest csRequest = new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().metadata(true); ClusterStateResponse csResponse = client(queryNode).execute(ClusterStateAction.INSTANCE, csRequest).actionGet(); PersistentTasksCustomMetadata tasks = csResponse.getState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); assertNotNull(tasks); diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index 3b61b0496c64d..cfeb55feba2c6 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -160,7 +160,7 @@ private Exporter getExporter(String name) { } private ActionFuture setVerificationMode(String name, SslVerificationMode mode) { - final ClusterUpdateSettingsRequest updateSettings = new ClusterUpdateSettingsRequest(); + final ClusterUpdateSettingsRequest updateSettings = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); final String verificationModeName = randomBoolean() ? mode.name() : mode.name().toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put("xpack.monitoring.exporters." + name + ".type", HttpExporter.TYPE) diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index 350d91048ac97..daea70abd29e3 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -207,7 +207,7 @@ public void testMonitoringService() throws Exception { ); final Settings settings = Settings.builder().put("cluster.metadata.display_name", "my cluster").build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings)); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings)); whenExportersAreReady(() -> { assertBusy(() -> { @@ -373,7 +373,7 @@ public void enableMonitoring() throws Exception { .put("xpack.monitoring.exporters._local.enabled", true) .build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings)); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings)); assertBusy(() -> assertThat("[_local] exporter not enabled yet", getMonitoringUsageExportersDefined(), is(true))); @@ -401,7 +401,7 @@ public void disableMonitoring() throws Exception { .putNull("cluster.metadata.display_name") .build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings)); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings)); assertBusy(() -> assertThat("Exporters are not yet stopped", getMonitoringUsageExportersDefined(), is(false))); assertBusy(() -> { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java index bf2825155530c..182930d47065b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java @@ -98,7 +98,11 @@ protected void masterOperation( Settings.Builder decommissionAlertSetting = Settings.builder().put(Monitoring.MIGRATION_DECOMMISSION_ALERTS.getKey(), true); client.admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings( + request.masterNodeTimeout(), + /* TODO expose separate ack timeout? use masterNodeTimeout() for now */ + request.masterNodeTimeout() + ) .setPersistentSettings(decommissionAlertSetting) .execute(completeOnManagementThread(listener)); } catch (Exception e) { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index 3c085b9bb2820..fbe2e957fc73b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -67,7 +67,7 @@ public void testMultipleNodes() throws Exception { final int nbNodes = nodes; assertBusy(() -> { assertThat(cluster().size(), equalTo(nbNodes)); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes(Integer.toString(nbNodes)).get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(nbNodes)).get()); }); enableMonitoringCollection(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 69ac9d4ddd876..93e055b58ddc3 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -246,7 +246,7 @@ private void checkMonitoringTemplates() { * fields and belongs to the right data or timestamped index. */ private void checkMonitoringDocs() { - ClusterStateResponse response = clusterAdmin().prepareState().get(); + ClusterStateResponse response = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); String customTimeFormat = response.getState() .getMetadata() .persistentSettings() diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index 79227f3dd2cee..a9f4ce806cdc8 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -119,7 +119,7 @@ public void testShardAllocationOnInvalidLicense() throws Exception { assertBusy( () -> assertEquals( ClusterHealthStatus.RED, - clusterAdmin().prepareHealth(indexName).get().getIndices().get(indexName).getStatus() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getIndices().get(indexName).getStatus() ) ); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/ProfilingTestCase.java index 67825f6ce8570..f6c55d9894c3a 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/action/ProfilingTestCase.java @@ -95,13 +95,13 @@ protected boolean requiresDataSetup() { protected void waitForIndices(Collection indices) throws Exception { assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue("Timed out waiting for indices to be created", state.metadata().indices().keySet().containsAll(indices)); }); } protected void updateProfilingTemplatesEnabled(boolean newValue) { - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); request.persistentSettings(Settings.builder().put(ProfilingPlugin.PROFILING_TEMPLATES_ENABLED.getKey(), newValue).build()); ClusterUpdateSettingsResponse response = clusterAdmin().updateSettings(request).actionGet(); assertTrue("Update of profiling templates enabled setting is not acknowledged", response.isAcknowledged()); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java index f73890e50a3a2..3ee49cce85a8a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -342,7 +342,7 @@ protected void assertRecoveryStats(String indexName, boolean preWarmEnabled) thr } protected DiscoveryNodes getDiscoveryNodes() { - return clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + return clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setNodes(true).get().getState().nodes(); } protected void assertExecutorIsIdle(String executorName) throws Exception { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 7ce1da3a07917..67d9d7a82acf3 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -147,7 +147,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { assertShardFolders(indexName, false); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(indexName) @@ -311,7 +311,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(restoredIndexName) @@ -365,7 +365,9 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); - final DiscoveryNode dataNode = randomFrom(clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values()); + final DiscoveryNode dataNode = randomFrom( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes().values() + ); updateIndexSettings( Settings.builder() @@ -378,7 +380,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { ); assertFalse( - clusterAdmin().prepareHealth(restoredIndexName) + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, restoredIndexName) .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .get() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/PrevalidateNodeRemovalWithSearchableSnapshotIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/PrevalidateNodeRemovalWithSearchableSnapshotIntegTests.java index 37e2427ae6891..96f4ae7fc891e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/PrevalidateNodeRemovalWithSearchableSnapshotIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/PrevalidateNodeRemovalWithSearchableSnapshotIntegTests.java @@ -52,7 +52,7 @@ public void testNodeRemovalFromClusterWihRedSearchableSnapshotIndex() throws Exc // Make sure the searchable snapshot index is red internalCluster().stopNode(node1); assertBusy(() -> { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(restoredIndexName) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, restoredIndexName) .setWaitForStatus(ClusterHealthStatus.RED) .setWaitForEvents(Priority.LANGUID) .execute() @@ -67,7 +67,8 @@ public void testNodeRemovalFromClusterWihRedSearchableSnapshotIndex() throws Exc case 2 -> req.setExternalIds(internalCluster().clusterService(node2).localNode().getExternalId()); default -> throw new IllegalStateException("Unexpected value"); } - PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build()).get(); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build(TEST_REQUEST_TIMEOUT)) + .get(); assertTrue(resp.getPrevalidation().isSafe()); assertThat(resp.getPrevalidation().message(), equalTo("all red indices are searchable snapshot indices")); assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index d5e87558d1ced..faf41e7e655a8 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -1067,7 +1067,14 @@ private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, } private IndexMetadata getIndexMetadata(String indexName) { - return clusterAdmin().prepareState().clear().setMetadata(true).setIndices(indexName).get().getState().metadata().index(indexName); + return clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices(indexName) + .get() + .getState() + .metadata() + .index(indexName); } private void waitUntilRecoveryIsDone(String index) throws Exception { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index c99f2be0a6cad..84a632a419ead 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -145,7 +145,7 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { assertShardFolders(indexName, false); - IndexMetadata indexMetadata = clusterAdmin().prepareState() + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(indexName) @@ -251,7 +251,7 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { ensureGreen(restoredIndexName); assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS); - indexMetadata = clusterAdmin().prepareState() + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(restoredIndexName) @@ -285,7 +285,9 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); - final DiscoveryNode dataNode = randomFrom(clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values()); + final DiscoveryNode dataNode = randomFrom( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes().values() + ); updateIndexSettings( Settings.builder() @@ -298,7 +300,7 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { ); assertFalse( - clusterAdmin().prepareHealth(restoredIndexName) + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, restoredIndexName) .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .get() @@ -630,7 +632,7 @@ public void testMountedSnapshotHasNoReplicasByDefault() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); ensureGreen(restoredIndexName); - final ClusterState state = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); assertThat( state.toString(), state.routingTable().index(restoredIndexName).shard(0).size(), @@ -751,7 +753,7 @@ public void testSnapshotMountedIndexWithTimestampsRecordsTimestampRangeInIndexMe mountSnapshot(repositoryName, snapshotOne.getName(), indexName, indexName, Settings.EMPTY); ensureGreen(indexName); - final IndexMetadata indexMetadata = clusterAdmin().prepareState() + final IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices(indexName) @@ -1044,7 +1046,7 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste assertBusy(() -> { final RestoreInProgress restoreInProgress = RestoreInProgress.get( - clusterAdmin().prepareState().clear().setCustoms(true).get().getState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setCustoms(true).get().getState() ); assertTrue(Strings.toString(restoreInProgress, true, true), restoreInProgress.isEmpty()); }); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java index 73f12f98f8ca0..a7e83325bef87 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -148,7 +148,7 @@ public void testShardAllocationOnInvalidLicense() throws Exception { assertBusy( () -> assertEquals( ClusterHealthStatus.RED, - clusterAdmin().prepareHealth(indexName).get().getIndices().get(indexName).getStatus() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getIndices().get(indexName).getStatus() ) ); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocationIntegTests.java index 6feccfc5ed117..6c6993b9a4ef4 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocationIntegTests.java @@ -50,7 +50,7 @@ public void testAllocatesToBestAvailableNodeOnRestart() throws Exception { setAllocation(EnableAllocationDecider.Allocation.ALL); ensureGreen(restoredIndex); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals( state.nodes().resolveNode(firstDataNode).getId(), state.routingTable().index(restoredIndex).shard(0).primaryShard().currentNodeId() @@ -88,7 +88,7 @@ public void testAllocatesReplicaToBestAvailableNodeOnRestart() throws Exception setAllocation(EnableAllocationDecider.Allocation.ALL); ensureGreen(restoredIndex); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Set nodesWithCache = Set.of( state.nodes().resolveNode(firstDataNode).getId(), state.nodes().resolveNode(secondDataNode).getId() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java index a34bcd16c375b..730118ceb6eea 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java @@ -241,7 +241,7 @@ public void testHighWatermarkCanNotBeExceededOnColdNode() throws Exception { // The cold/frozen data node has enough disk space to hold all the shards assertBusy(() -> { - var state = clusterAdmin().prepareState().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setRoutingTable(true).get().getState(); assertThat( state.routingTable() .allShards() @@ -257,7 +257,7 @@ public void testHighWatermarkCanNotBeExceededOnColdNode() throws Exception { mountIndices(indicesStoresSizes.keySet(), "extra-", repositoryName, snapshot, storage); assertBusy(() -> { - var state = clusterAdmin().prepareState().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setRoutingTable(true).get().getState(); assertThat( state.routingTable() .allShards() @@ -294,7 +294,7 @@ public void testHighWatermarkCanNotBeExceededWithInitializingSearchableSnapshots ); ensureStableCluster(3); - String coldNodeId = clusterAdmin().prepareState().get().getState().nodes().resolveNode(coldNodeName).getId(); + String coldNodeId = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().resolveNode(coldNodeName).getId(); logger.info("--> reducing disk size of node [{}/{}] so that all shards except one can fit on the node", coldNodeName, coldNodeId); String indexToSkip = randomFrom(indicesStoresSizes.keySet()); Map indicesToBeMounted = indicesStoresSizes.entrySet() @@ -316,7 +316,7 @@ public void testHighWatermarkCanNotBeExceededWithInitializingSearchableSnapshots mountIndices(indicesToBeMounted.keySet(), prefix, repositoryName, snapshotName, FULL_COPY); assertBusy(() -> { - var state = clusterAdmin().prepareState().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setRoutingTable(true).get().getState(); assertThat( state.routingTable() .allShards() @@ -333,7 +333,7 @@ public void testHighWatermarkCanNotBeExceededWithInitializingSearchableSnapshots mountIndices(List.of(indexToSkip), prefix, repositoryName, snapshotName, FULL_COPY); assertBusy(() -> { - var state = clusterAdmin().prepareState().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setRoutingTable(true).get().getState(); assertThat(state.routingTable().index(prefix + indexToSkip).shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); }); @@ -343,7 +343,7 @@ public void testHighWatermarkCanNotBeExceededWithInitializingSearchableSnapshots mockRepository.unlockRestore(); assertBusy(() -> { - var state = clusterAdmin().prepareState().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setRoutingTable(true).get().getState(); assertThat(state.routingTable().index(prefix + indexToSkip).shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat( state.routingTable() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java index d6291225f0c1a..9dadb75e87cef 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java @@ -39,7 +39,8 @@ public void testAllocationDisabled() throws Exception { internalCluster().restartNode(indexNode); } - ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest(restoredIndexName)).actionGet(); + ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, restoredIndexName)) + .actionGet(); assertThat(response.getUnassignedShards(), Matchers.equalTo(numPrimaries)); setAllocateOnRollingRestart(true); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotShutdownIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotShutdownIntegTests.java index 6a1de58e97039..1fd30b5e5b9a4 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotShutdownIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotShutdownIntegTests.java @@ -58,7 +58,12 @@ public void testAllocationDisabledDuringShutdown() throws Exception { final Set indexNodes = restoredIndexNames.stream() .flatMap(index -> internalCluster().nodesInclude(index).stream()) .collect(Collectors.toSet()); - final ClusterState state = clusterAdmin().prepareState().clear().setRoutingTable(true).setNodes(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .setNodes(true) + .get() + .getState(); final Map nodeNameToId = state.getNodes() .stream() .collect(Collectors.toMap(DiscoveryNode::getName, DiscoveryNode::getId)); @@ -82,8 +87,9 @@ public void testAllocationDisabledDuringShutdown() throws Exception { @Override public Settings onNodeStopped(String nodeName) throws Exception { assertBusy(() -> { - ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest(restoredIndexNamesArray)) - .actionGet(); + ClusterHealthResponse response = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, restoredIndexNamesArray) + ).actionGet(); assertThat(response.getUnassignedShards(), Matchers.equalTo(shards)); }); return super.onNodeStopped(nodeName); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotsRelocationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotsRelocationIntegTests.java index a45d6b5c72a6f..434ed6822e2de 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotsRelocationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotsRelocationIntegTests.java @@ -75,10 +75,15 @@ public void testRelocationWaitsForPreWarm() throws Exception { }); assertBusy(() -> assertSame(RecoveryState.Stage.FINALIZE, getRelocations(restoredIndex).get(0).getStage())); - final Index restoredIdx = clusterAdmin().prepareState().get().getState().metadata().index(restoredIndex).getIndex(); + final Index restoredIdx = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(restoredIndex) + .getIndex(); final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, secondDataNode); assertEquals(1, indicesService.indexService(restoredIdx).getShard(0).outstandingCleanFilesConditions()); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final String primaryNodeId = state.routingTable().index(restoredIndex).shard(0).primaryShard().currentNodeId(); final DiscoveryNode primaryNode = state.nodes().resolveNode(primaryNodeId); assertEquals(firstDataNode, primaryNode.getName()); @@ -87,7 +92,7 @@ public void testRelocationWaitsForPreWarm() throws Exception { latch.countDown(); assertFalse( - clusterAdmin().prepareHealth(restoredIndex) + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, restoredIndex) .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .get() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPersistentCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPersistentCacheIntegTests.java index 93d60f161e653..ca2e683736be6 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPersistentCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPersistentCacheIntegTests.java @@ -79,7 +79,12 @@ public void testCacheSurviveRestart() throws Exception { assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); assertAcked(client().admin().indices().prepareDelete(indexName)); - final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .nodes(); final String dataNode = randomFrom(discoveryNodes.getDataNodes().values()).getName(); mountSnapshot( @@ -94,7 +99,7 @@ public void testCacheSurviveRestart() throws Exception { assertExecutorIsIdle(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); assertExecutorIsIdle(SearchableSnapshots.CACHE_PREWARMING_THREAD_POOL_NAME); - final Index restoredIndex = clusterAdmin().prepareState() + final Index restoredIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .get() @@ -208,7 +213,11 @@ public void testPersistentCacheCleanUpAfterRelocation() throws Exception { .allMatch(recoveryState -> recoveryState.getStage() == RecoveryState.Stage.DONE) ); - final ClusterStateResponse state = clusterAdmin().prepareState().clear().setMetadata(true).setIndices(mountedIndexName).get(); + final ClusterStateResponse state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices(mountedIndexName) + .get(); final Index mountedIndex = state.getState().metadata().index(mountedIndexName).getIndex(); final Set dataNodes = new HashSet<>(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index b260f6cf2a891..e847dc16f2dea 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -124,7 +124,7 @@ public void testNodesCachesStats() throws Exception { assertThat(clearCacheResponse.getSuccessfulShards(), greaterThan(0)); assertThat(clearCacheResponse.getFailedShards(), equalTo(0)); - final String[] dataNodesWithFrozenShards = clusterAdmin().prepareState() + final String[] dataNodesWithFrozenShards = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .routingTable() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java index 7b372ddc53d80..3896a57027a62 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/PartiallyCachedShardAllocationIntegTests.java @@ -107,7 +107,7 @@ public void testPartialSearchableSnapshotNotAllocatedToNodesWithoutCache() throw final MountSearchableSnapshotRequest req = prepareMountRequest(); final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); assertTrue(state.toString(), state.routingTable().index(req.mountedIndexName()).allPrimaryShardsUnassigned()); final var explanation = getClusterAllocationExplanation(client(), req.mountedIndexName(), 0, true); @@ -139,7 +139,12 @@ public void testPartialSearchableSnapshotAllocatedToNodesWithCache() throws Exce assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); ensureGreen(req.mountedIndexName()); - final ClusterState state = clusterAdmin().prepareState().clear().setNodes(true).setRoutingTable(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .setRoutingTable(true) + .get() + .getState(); final Set newNodeIds = newNodeNames.stream().map(n -> state.nodes().resolveNode(n).getId()).collect(Collectors.toSet()); for (ShardRouting shardRouting : state.routingTable().index(req.mountedIndexName()).shardsWithState(ShardRoutingState.STARTED)) { assertThat(state.toString(), newNodeIds, hasItem(shardRouting.currentNodeId())); @@ -160,7 +165,7 @@ public void testOnlyPartialSearchableSnapshotAllocatedToDedicatedFrozenNodes() t createIndex("other-index", Settings.builder().putNull(TIER_PREFERENCE).build()); ensureGreen("other-index"); - final RoutingNodes routingNodes = clusterAdmin().prepareState() + final RoutingNodes routingNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .setNodes(true) @@ -177,7 +182,12 @@ public void testOnlyPartialSearchableSnapshotAllocatedToDedicatedFrozenNodes() t assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); ensureGreen(req.mountedIndexName()); - final ClusterState state = clusterAdmin().prepareState().clear().setNodes(true).setRoutingTable(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .setRoutingTable(true) + .get() + .getState(); final Set newNodeIds = newNodeNames.stream().map(n -> state.nodes().resolveNode(n).getId()).collect(Collectors.toSet()); for (ShardRouting shardRouting : state.routingTable().index(req.mountedIndexName()).shardsWithState(ShardRoutingState.STARTED)) { assertThat(state.toString(), newNodeIds, hasItem(shardRouting.currentNodeId())); @@ -272,7 +282,12 @@ public void testPartialSearchableSnapshotDelaysAllocationUntilNodeCacheStatesKno assertFalse("should have failed before success", failurePermits.tryAcquire()); final Map shardCountsByNodeName = new HashMap<>(); - final ClusterState state = clusterAdmin().prepareState().clear().setRoutingTable(true).setNodes(true).get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .setNodes(true) + .get() + .getState(); for (RoutingNode routingNode : state.getRoutingNodes()) { shardCountsByNodeName.put( routingNode.node().getName(), diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java index 4e5e6ab976330..9257fc747b752 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java @@ -83,7 +83,7 @@ public void testRecoveryStateRecoveredBytesMatchPhysicalCacheState() throws Exce mountSnapshot(fsRepoName, snapshotName, indexName, restoredIndexName, Settings.EMPTY); ensureGreen(restoredIndexName); - final Index restoredIndex = clusterAdmin().prepareState() + final Index restoredIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .get() @@ -145,7 +145,7 @@ public void testFilesStoredInThePersistentCacheAreMarkedAsReusedInRecoveryState( internalCluster().restartRandomDataNode(); ensureGreen(restoredIndexName); - final Index restoredIndex = clusterAdmin().prepareState() + final Index restoredIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .get() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java index f17a0552f5834..bd32fd75793c7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java @@ -260,7 +260,7 @@ private void waitForSnapshotToFinish(String repo, String snapshot) throws Except // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well SnapshotsInProgress snapshotsInProgress = SnapshotsInProgress.get( - clusterAdmin().state(new ClusterStateRequest()).get().getState() + clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).get().getState() ); assertTrue(snapshotsInProgress.isEmpty()); }); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndexPrivilegeIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndexPrivilegeIntegTests.java index 0baafc14d9cae..b1ddc0fa5bde0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndexPrivilegeIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/IndexPrivilegeIntegTests.java @@ -559,7 +559,7 @@ private void assertUserExecutes(String user, String action, String index, boolea assertAccessIsAllowed(user, "DELETE", "/" + index); assertUserIsAllowed(user, "create_index", index); // wait until index ready, but as admin - assertNoTimeout(clusterAdmin().prepareHealth(index).setWaitForGreenStatus().get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, index).setWaitForGreenStatus().get()); assertAccessIsAllowed(user, "POST", "/" + index + "/_refresh"); assertAccessIsAllowed(user, "GET", "/" + index + "/_analyze", "{ \"text\" : \"test\" }"); assertAccessIsAllowed(user, "POST", "/" + index + "/_flush"); @@ -569,7 +569,7 @@ private void assertUserExecutes(String user, String action, String index, boolea assertAccessIsAllowed(user, "POST", "/" + index + "/_cache/clear"); // indexing a document to have the mapping available, and wait for green state to make sure index is created assertAccessIsAllowed("admin", "PUT", "/" + index + "/_doc/1", jsonDoc); - assertNoTimeout(clusterAdmin().prepareHealth(index).setWaitForGreenStatus().get()); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, index).setWaitForGreenStatus().get()); assertAccessIsAllowed(user, "GET", "/" + index + "/_mapping/field/name"); assertAccessIsAllowed(user, "GET", "/" + index + "/_settings"); assertAccessIsAllowed(user, randomFrom("GET", "POST"), "/" + index + "/_field_caps?fields=*"); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 21a5b53e89af7..38dd7116acce4 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -229,7 +229,7 @@ private void assertRoleMappingsSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).get(); ReservedStateMetadata reservedState = clusterStateResponse.getState() @@ -251,7 +251,7 @@ private void assertRoleMappingsSaveOK(CountDownLatch savedClusterState, AtomicLo equalTo("50mb") ); - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") ); assertEquals( @@ -300,7 +300,7 @@ public void testRoleMappingsApplied() throws Exception { assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) ).get(); assertNull( @@ -384,7 +384,7 @@ public void testErrorSaved() throws Exception { assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) ).get(); assertNull( @@ -440,8 +440,9 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { assertFalse(response.hasMappings()); // cluster state settings are also applied - var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get())) - .get(); + var clusterStateResponse = clusterAdmin().state( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) + ).get(); assertThat( clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("50mb") diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java index 2ca799e94874c..90ed62c971d9c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java @@ -175,7 +175,7 @@ private void waitForSnapshotToFinish(String repo, String snapshot) throws Except // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well SnapshotsInProgress snapshotsInProgress = SnapshotsInProgress.get( - clusterAdmin().state(new ClusterStateRequest()).get().getState() + clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).get().getState() ); assertTrue(snapshotsInProgress.isEmpty()); }); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index e5a1ff867302c..05dc4325a302a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -34,7 +34,7 @@ public void testShrinkIndex() throws Exception { prepareIndex("bigindex").setSource("foo", "bar").get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); final String mergeNode = discoveryNodes[0].getName(); ensureGreen(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java index 27b85888864c9..42b807b5f045b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java @@ -158,7 +158,7 @@ public void testEnableDisableBehaviour() throws Exception { assertElasticsearchSecurityException(() -> client.admin().indices().prepareStats().get()); assertElasticsearchSecurityException(() -> client.admin().cluster().prepareClusterStats().get()); - assertElasticsearchSecurityException(() -> client.admin().cluster().prepareHealth().get()); + assertElasticsearchSecurityException(() -> client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get()); assertElasticsearchSecurityException(() -> client.admin().cluster().prepareNodesStats().get()); enableLicensing(randomFrom(License.OperationMode.values())); @@ -172,7 +172,7 @@ public void testEnableDisableBehaviour() throws Exception { assertThat(indices, notNullValue()); assertThat(indices.getIndexCount(), greaterThanOrEqualTo(2)); - ClusterHealthResponse clusterIndexHealth = client.admin().cluster().prepareHealth().get(); + ClusterHealthResponse clusterIndexHealth = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterIndexHealth, notNullValue()); NodesStatsResponse nodeStats = client.admin().cluster().prepareNodesStats().get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 032cab446df2a..c0f82adc88784 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -88,7 +88,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - var clusterState = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState(); + var clusterState = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(); assertRoleMappingReservedMetadata(clusterState, "everyone_kibana_alone", "everyone_fleet_alone"); List roleMappings = new ArrayList<>(RoleMappingMetadata.getFromClusterState(clusterState).getRoleMappings()); assertThat( @@ -118,7 +118,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { ensureGreen(); // assert role mappings are recovered from "disk" - clusterState = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState(); + clusterState = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(); assertRoleMappingReservedMetadata(clusterState, "everyone_kibana_alone", "everyone_fleet_alone"); roleMappings = new ArrayList<>(RoleMappingMetadata.getFromClusterState(clusterState).getRoleMappings()); assertThat( @@ -151,7 +151,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - clusterState = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState(); + clusterState = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(); assertRoleMappingReservedMetadata(clusterState); roleMappings = new ArrayList<>(RoleMappingMetadata.getFromClusterState(clusterState).getRoleMappings()); assertThat(roleMappings, emptyIterable()); @@ -162,7 +162,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { ensureGreen(); // assert empty role mappings are recovered from "disk" - clusterState = clusterAdmin().state(new ClusterStateRequest()).actionGet().getState(); + clusterState = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet().getState(); assertRoleMappingReservedMetadata(clusterState); roleMappings = new ArrayList<>(RoleMappingMetadata.getFromClusterState(clusterState).getRoleMappings()); assertThat(roleMappings, emptyIterable()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java index 84a7f752d56a4..0e2e6c909fbd7 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -103,7 +103,9 @@ public void testInvalidFilterSettings() throws Exception { settingsBuilder.put(randomFrom(allSettingsKeys), invalidLuceneRegex); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(settingsBuilder.build()).get() + () -> clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder.build()) + .get() ); assertThat(e.getMessage(), containsString("invalid pattern [/invalid]")); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 076ac01f1c8f3..f164189c6f047 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -305,7 +305,7 @@ public void testCreateApiKey() throws Exception { () -> client().filterWithHeader(authorizationHeaders) .admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), true)) .get() ); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index 3025e3f061fcd..66ea1235800ed 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -796,7 +796,7 @@ public void wipeSecurityIndex() throws Exception { } public void testMetadataIsNotSentToClient() { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setCustoms(true).get(); assertFalse(clusterStateResponse.getState().customs().containsKey(TokenMetadata.TYPE)); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java index 433f6aac1840e..ccdf7704f221a 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/apikey/ApiKeySingleNodeTests.java @@ -315,7 +315,7 @@ public void testGrantApiKeyForUserWithRunAs() throws IOException { assertThat(apiKey.getRealmType(), equalTo("native")); final Client clientWithGrantedKey = client().filterWithHeader(Map.of("Authorization", "ApiKey " + base64ApiKeyKeyValue)); // The API key has privileges (inherited from user2) to check cluster health - clientWithGrantedKey.execute(TransportClusterHealthAction.TYPE, new ClusterHealthRequest()).actionGet(); + clientWithGrantedKey.execute(TransportClusterHealthAction.TYPE, new ClusterHealthRequest(TEST_REQUEST_TIMEOUT)).actionGet(); // If the API key is granted with limiting descriptors, it should not be able to read pipeline if (grantApiKeyRequest.getApiKeyRequest().getRoleDescriptors().isEmpty()) { clientWithGrantedKey.execute(GetPipelineAction.INSTANCE, new GetPipelineRequest()).actionGet(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 6a1fd9a15cd21..78146e58e91e2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -440,7 +440,7 @@ public void testCreateAndUpdateRole() { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertFalse(response.isTimedOut()); preparePutRole("test_role").cluster("none") @@ -495,12 +495,20 @@ public void testCreateAndUpdateRole() { private void assertClusterHealthOnlyAuthorizesWhenAnonymousRoleActive(String token) { if (anonymousEnabled && roleExists) { assertNoTimeout( - client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); } else { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); assertThat(e.status(), is(RestStatus.FORBIDDEN)); } @@ -607,7 +615,7 @@ public void testAuthenticateWithDeletedRole() { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertFalse(response.isTimedOut()); new DeleteRoleRequestBuilder(client()).name("test_role").get(); @@ -641,7 +649,7 @@ public void testPutUserWithoutPassword() { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertFalse(response.isTimedOut()); @@ -657,12 +665,20 @@ public void testPutUserWithoutPassword() { // test that role change took effect if anonymous is disabled as anonymous grants monitoring permissions... if (anonymousEnabled && roleExists) { assertNoTimeout( - client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); } else { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); assertThat(e.status(), is(RestStatus.FORBIDDEN)); assertThat(e.getMessage(), containsString("authorized")); @@ -680,7 +696,11 @@ public void testPutUserWithoutPassword() { // validate that joe cannot auth with the old token try { - client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get(); + client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get(); fail("should not authenticate with old password"); } catch (ElasticsearchSecurityException e) { assertThat(e.getMessage(), containsString("authenticate")); @@ -689,7 +709,7 @@ public void testPutUserWithoutPassword() { // test with new password and role response = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", new SecureString(secondPassword.toCharArray()))) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertFalse(response.isTimedOut()); } @@ -800,7 +820,7 @@ public void testCreateAndChangePassword() throws Exception { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertThat(response.isTimedOut(), is(false)); @@ -811,13 +831,17 @@ public void testCreateAndChangePassword() throws Exception { ElasticsearchSecurityException expected = expectThrows( ElasticsearchSecurityException.class, - () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); assertThat(expected.status(), is(RestStatus.UNAUTHORIZED)); response = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(response.isTimedOut(), is(false)); } @@ -942,7 +966,7 @@ public void testSetEnabled() throws Exception { ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertThat(response.isTimedOut(), is(false)); @@ -950,13 +974,21 @@ public void testSetEnabled() throws Exception { ElasticsearchSecurityException expected = expectThrows( ElasticsearchSecurityException.class, - () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get() + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get() ); assertThat(expected.status(), is(RestStatus.UNAUTHORIZED)); new SetEnabledRequestBuilder(client()).username("joe").enabled(true).get(); - response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get(); + response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT) + .get(); assertThat(response.isTimedOut(), is(false)); IllegalArgumentException e = expectThrows( @@ -974,14 +1006,14 @@ public void testNegativeLookupsThenCreateRole() throws Exception { if (anonymousEnabled && roleExists) { ClusterHealthResponse response = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", new SecureString("s3krit-password"))) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertNoTimeout(response); } else { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, () -> client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", new SecureString("s3krit-password"))) - ).admin().cluster().prepareHealth().get() + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get() ); assertThat(e.status(), is(RestStatus.FORBIDDEN)); } @@ -990,7 +1022,7 @@ public void testNegativeLookupsThenCreateRole() throws Exception { preparePutRole("unknown_role").cluster("all").get(); ClusterHealthResponse response = client().filterWithHeader( Collections.singletonMap("Authorization", basicAuthHeaderValue("joe", new SecureString("s3krit-password"))) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertNoTimeout(response); } @@ -1017,7 +1049,7 @@ public void testConcurrentRunAs() throws Exception { try { latch.await(); for (int j = 0; j < numberOfIterations; j++) { - ClusterHealthResponse response = client.admin().cluster().prepareHealth().get(); + ClusterHealthResponse response = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertNoTimeout(response); } } catch (InterruptedException e) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java index ae48d7563494f..4d3fa73c8e248 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmElasticAutoconfigIntegTests.java @@ -92,7 +92,10 @@ private void awaitSecurityMigrationRanOnce() { public void testAutoconfigFailedPasswordPromotion() { try { // prevents the .security index from being created automatically (after elastic user authentication) - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); @@ -146,7 +149,10 @@ public void testAutoconfigFailedPasswordPromotion() { exception = expectThrows(ResponseException.class, () -> getRestClient().performRequest(restRequest2)); assertThat(exception.getResponse().getStatusLine().getStatusCode(), is(RestStatus.UNAUTHORIZED.getStatus())); } finally { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings( Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), (String) null) ); @@ -168,7 +174,10 @@ public void testAutoconfigSucceedsAfterPromotionFailure() throws Exception { awaitSecurityMigrationRanOnce(); // but then make the cluster read-only - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)); assertAcked(clusterAdmin().updateSettings(updateSettingsRequest).actionGet()); @@ -190,7 +199,7 @@ public void testAutoconfigSucceedsAfterPromotionFailure() throws Exception { ResponseException exception = expectThrows(ResponseException.class, () -> getRestClient().performRequest(restRequest)); assertThat(exception.getResponse().getStatusLine().getStatusCode(), is(RestStatus.SERVICE_UNAVAILABLE.getStatus())); // clear cluster-wide write block - updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); updateSettingsRequest.transientSettings( Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), (String) null) ); @@ -232,7 +241,10 @@ public void testAutoconfigSucceedsAfterPromotionFailure() throws Exception { restRequest3.setOptions(options); assertThat(getRestClient().performRequest(restRequest3).getStatusLine().getStatusCode(), is(RestStatus.OK.getStatus())); } finally { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); updateSettingsRequest.transientSettings( Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), (String) null) ); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java index d9e76195d7363..6abb45e62050b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -64,7 +64,7 @@ public void testAuthenticate() { for (String username : usernames) { ClusterHealthResponse response = client().filterWithHeader( singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword())) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getClusterName(), is(cluster().getClusterName())); } @@ -89,7 +89,7 @@ public void testAuthenticateAfterEnablingUser() throws IOException { ClusterHealthResponse response = client().filterWithHeader( singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword())) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getClusterName(), is(cluster().getClusterName())); } @@ -110,7 +110,7 @@ public void testChangingPassword() throws IOException { if (randomBoolean()) { ClusterHealthResponse response = client().filterWithHeader( singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword())) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getClusterName(), is(cluster().getClusterName())); } @@ -121,14 +121,14 @@ public void testChangingPassword() throws IOException { () -> client().filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get() ); assertThat(elasticsearchSecurityException.getMessage(), containsString("authenticate")); ClusterHealthResponse healthResponse = client().filterWithHeader( singletonMap("Authorization", basicAuthHeaderValue(username, new SecureString(newPassword))) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(healthResponse.getClusterName(), is(cluster().getClusterName())); } @@ -136,7 +136,7 @@ public void testDisablingUser() throws Exception { // validate the user works ClusterHealthResponse response = client().filterWithHeader( singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword())) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getClusterName(), is(cluster().getClusterName())); // disable user @@ -146,7 +146,7 @@ public void testDisablingUser() throws Exception { () -> client().filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get() ); assertThat(elasticsearchSecurityException.getMessage(), containsString("authenticate")); @@ -156,7 +156,7 @@ public void testDisablingUser() throws Exception { response = client().filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertThat(response.getClusterName(), is(cluster().getClusterName())); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java index d11ca70744b7b..dddc4e7ba1787 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreCacheTests.java @@ -303,14 +303,14 @@ public void testRolesCacheIsClearedWhenPrivilegesIsChanged() { "Basic " + Base64.getEncoder().encodeToString((testRoleCacheUser + ":longerpassword").getBytes(StandardCharsets.UTF_8)) ) ); - new ClusterHealthRequestBuilder(testRoleCacheUserClient).get(); + new ClusterHealthRequestBuilder(testRoleCacheUserClient, TEST_REQUEST_TIMEOUT).get(); // Directly deleted the role document final DeleteResponse deleteResponse = client.prepareDelete(SECURITY_MAIN_ALIAS, "role-" + testRole).get(); assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); // The cluster health action can still success since the role is cached - new ClusterHealthRequestBuilder(testRoleCacheUserClient).get(); + new ClusterHealthRequestBuilder(testRoleCacheUserClient, TEST_REQUEST_TIMEOUT).get(); // Change an application privilege which triggers role cache invalidation as well if (randomBoolean()) { @@ -319,7 +319,10 @@ public void testRolesCacheIsClearedWhenPrivilegesIsChanged() { addApplicationPrivilege("app-3", "read", "r:q:r:s"); } // Since role cache is cleared, the cluster health action is no longer authorized - expectThrows(ElasticsearchSecurityException.class, () -> new ClusterHealthRequestBuilder(testRoleCacheUserClient).get()); + expectThrows( + ElasticsearchSecurityException.class, + () -> new ClusterHealthRequestBuilder(testRoleCacheUserClient, TEST_REQUEST_TIMEOUT).get() + ); } private HasPrivilegesResponse checkPrivilege(String applicationName, String privilegeName) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java index 2503f2fc17d20..1e4682ae15301 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreSingleNodeTests.java @@ -77,7 +77,7 @@ public void configureApplicationPrivileges() { public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IOException { // Disable expensive query - new ClusterUpdateSettingsRequestBuilder(client()).setTransientSettings( + new ClusterUpdateSettingsRequestBuilder(client(), TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings( Settings.builder().put(ALLOW_EXPENSIVE_QUERIES.getKey(), false) ).get(); @@ -184,7 +184,7 @@ public void testResolvePrivilegesWorkWhenExpensiveQueriesAreDisabled() throws IO assertThat(authenticateResponse.authentication().getEffectiveSubject().getUser().principal(), equalTo("app_user")); } finally { // Reset setting since test suite expects things in a clean slate - new ClusterUpdateSettingsRequestBuilder(client()).setTransientSettings( + new ClusterUpdateSettingsRequestBuilder(client(), TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings( Settings.builder().putNull(ALLOW_EXPENSIVE_QUERIES.getKey()) ).get(); } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesSingleNodeTests.java index 9593bfa5ab723..30cd1f254d00d 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/operator/OperatorPrivilegesSingleNodeTests.java @@ -66,7 +66,9 @@ protected Settings nodeSettings() { } public void testNormalSuperuserWillFailToCallOperatorOnlyAction() { - final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT + ); final ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, () -> client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest).actionGet() @@ -76,7 +78,10 @@ public void testNormalSuperuserWillFailToCallOperatorOnlyAction() { public void testNormalSuperuserWillFailToSetOperatorOnlySettings() { final Settings settings = Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), "null").build(); - final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); if (randomBoolean()) { clusterUpdateSettingsRequest.transientSettings(settings); } else { @@ -91,13 +96,18 @@ public void testNormalSuperuserWillFailToSetOperatorOnlySettings() { public void testOperatorUserWillSucceedToCallOperatorOnlyAction() { final Client client = createOperatorClient(); - final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT + ); client.execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest).actionGet(); } public void testOperatorUserWillSucceedToSetOperatorOnlySettings() { final Client client = createOperatorClient(); - final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); final Settings settings = Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), false).build(); final boolean useTransientSetting = randomBoolean(); try { @@ -108,7 +118,10 @@ public void testOperatorUserWillSucceedToSetOperatorOnlySettings() { } client.execute(ClusterUpdateSettingsAction.INSTANCE, clusterUpdateSettingsRequest).actionGet(); } finally { - final ClusterUpdateSettingsRequest clearSettingsRequest = new ClusterUpdateSettingsRequest(); + final ClusterUpdateSettingsRequest clearSettingsRequest = new ClusterUpdateSettingsRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); final Settings clearSettings = Settings.builder().putNull(IPFilter.IP_FILTER_ENABLED_SETTING.getKey()).build(); if (useTransientSetting) { clearSettingsRequest.transientSettings(clearSettings); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java index f43275c2d8b70..6e95e9e3a74d6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/SecurityDomainIntegTests.java @@ -336,7 +336,7 @@ public void testDomainCaptureForApiKey() throws IOException { (createApiKeyResponse.getId() + ":" + createApiKeyResponse.getKey()).getBytes(StandardCharsets.UTF_8) ) ) - ).admin().cluster().prepareHealth().get(); + ).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); } public void testDomainCaptureForServiceToken() throws IOException { @@ -364,7 +364,7 @@ public void testDomainCaptureForServiceToken() throws IOException { client().filterWithHeader(Map.of("Authorization", "Bearer " + createServiceTokenResponse.getValue())) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); } @@ -372,7 +372,7 @@ private void assertAccessToken(CreateTokenResponse createTokenResponse) { client().filterWithHeader(Map.of("Authorization", "Bearer " + createTokenResponse.getTokenString())) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .get(); assertResponse(prepareSearch(SecuritySystemIndices.SECURITY_TOKENS_ALIAS), searchResponse -> { final String encodedAuthentication; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 0b1d33cb35c97..4f04f77ef024e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -87,7 +87,7 @@ public void testThatIpFilterConfigurationCanBeChangedDynamically() throws Except assertConnectionRejected("client", "127.0.0.8"); // check that all is in cluster state - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().settings().get("xpack.security.transport.filter.allow"), is("127.0.0.1")); assertThat(clusterState.metadata().settings().get("xpack.security.transport.filter.deny"), is("127.0.0.8")); assertEquals(Arrays.asList("127.0.0.1"), clusterState.metadata().settings().getAsList("xpack.security.http.filter.allow")); @@ -105,7 +105,7 @@ public void testThatIpFilterConfigurationCanBeChangedDynamically() throws Except assertConnectionAccepted("client", "127.0.0.8"); // disabling should not have any effect on the cluster state settings - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().settings().get("xpack.security.transport.filter.allow"), is("127.0.0.1")); assertThat(clusterState.metadata().settings().get("xpack.security.transport.filter.deny"), is("127.0.0.8")); assertEquals(Arrays.asList("127.0.0.1"), clusterState.metadata().settings().getAsList("xpack.security.http.filter.allow")); @@ -148,7 +148,9 @@ public void testThatInvalidDynamicIpFilterConfigurationIsRejected() { expectThrows( IllegalArgumentException.class, settingName, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(settings).get() + () -> clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settings) + .get() ).getMessage(), allOf(containsString("invalid IP filter"), containsString(invalidValue)) ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 1ed371229d074..aabfa0593e5ff 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -387,7 +387,7 @@ public void assertSecurityIndexActive() throws Exception { public void assertSecurityIndexActive(TestCluster testCluster) throws Exception { for (Client client : testCluster.getClients()) { assertBusy(() -> { - ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertFalse(clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); Index securityIndex = resolveSecurityIndex(clusterState.metadata()); if (securityIndex != null) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 3be0a17d19253..bb46beae86577 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -1612,7 +1612,7 @@ public void testDenialErrorMessagesForClusterHealthAction() { AuditUtil.getOrGenerateRequestId(threadContext); - TransportRequest request = new ClusterHealthRequest(); + TransportRequest request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT); ElasticsearchSecurityException securityException = expectThrows( ElasticsearchSecurityException.class, @@ -2143,7 +2143,10 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic } // we should allow waiting for the health of the index or any index if the user has this permission - ClusterHealthRequest request = new ClusterHealthRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)); + ClusterHealthRequest request = new ClusterHealthRequest( + TEST_REQUEST_TIMEOUT, + randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7) + ); authorize(authentication, TransportClusterHealthAction.NAME, request); verify(auditTrail).accessGranted( eq(requestId), @@ -2154,7 +2157,7 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic ); // multiple indices - request = new ClusterHealthRequest(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7, "foo", "bar"); + request = new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7, "foo", "bar"); authorize(authentication, TransportClusterHealthAction.NAME, request); verify(auditTrail).accessGranted( eq(requestId), @@ -2283,13 +2286,18 @@ public void testSuperusersCanExecuteReadOperationAgainstSecurityIndex() { requests.add( new Tuple<>( TransportClusterHealthAction.NAME, - new ClusterHealthRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) ) ); requests.add( new Tuple<>( TransportClusterHealthAction.NAME, - new ClusterHealthRequest(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7), "foo", "bar") + new ClusterHealthRequest( + TEST_REQUEST_TIMEOUT, + randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7), + "foo", + "bar" + ) ) ); diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java index 7eac3d9c7fd9f..8809a799fecec 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownDelayedAllocationIT.java @@ -64,7 +64,9 @@ public void testShardAllocationIsDelayedForRestartingNode() throws Exception { internalCluster().restartNode(nodeToRestartName, new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) throws Exception { - assertBusy(() -> assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1))); + assertBusy( + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)) + ); return super.onNodeStopped(nodeName); } }); @@ -135,7 +137,9 @@ public void testIndexLevelAllocationDelayWillBeUsedIfLongerThanShutdownDelay() t internalCluster().restartNode(nodeToRestartName, new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) throws Exception { - assertBusy(() -> { assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); }); + assertBusy( + () -> { assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); } + ); return super.onNodeStopped(nodeName); } }); @@ -218,7 +222,9 @@ private String setupLongTimeoutTestCase() throws Exception { internalCluster().stopNode(nodeToRestartName); // Verify that the shard's allocation is delayed - assertBusy(() -> { assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); }); + assertBusy( + () -> { assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); } + ); return nodeToRestartId; } @@ -233,13 +239,13 @@ private void indexRandomData() throws Exception { } private String findIdOfNodeWithShard() { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); return randomFrom(startedShards).currentNodeId(); } private String findNodeNameFromId(String id) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return state.nodes().get(id).getName(); } } diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java index 1594f78e04140..d12d093dd5b8d 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownShardsIT.java @@ -426,7 +426,7 @@ private void indexRandomData(String index) throws Exception { } private String findIdOfNodeWithPrimaryShard(String indexName) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); return startedShards.stream() .filter(ShardRouting::primary) @@ -465,7 +465,7 @@ private void assertNodeShutdownStatus(String nodeId, SingleNodeShutdownMetadata. } private void assertIndexPrimaryShardsAreAllocatedOnNode(String indexName, String nodeId) { - var state = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); var indexRoutingTable = state.routingTable().index(indexName); for (int p = 0; p < indexRoutingTable.size(); p++) { var primaryShard = indexRoutingTable.shard(p).primaryShard(); @@ -485,7 +485,7 @@ private void assertIndexPrimaryShardsAreAllocatedOnNode(String indexName, String } private void assertIndexReplicaShardsAreNotAllocated(String indexName) { - var state = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); var indexRoutingTable = state.routingTable().index(indexName); for (int p = 0; p < indexRoutingTable.size(); p++) { for (ShardRouting replicaShard : indexRoutingTable.shard(p).replicaShards()) { diff --git a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index d42c8ec9655ef..6ee35086baf76 100644 --- a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -432,7 +432,7 @@ public void testBasicTimeBasedRetention() throws Exception { }, 60, TimeUnit.SECONDS); // Run retention every second - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?")); try (XContentBuilder builder = jsonBuilder()) { req.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -474,7 +474,7 @@ public void testBasicTimeBasedRetention() throws Exception { } finally { // Unset retention - ClusterUpdateSettingsRequest unsetRequest = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest unsetRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); unsetRequest.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null)); try (XContentBuilder builder = jsonBuilder()) { unsetRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -895,7 +895,7 @@ private void createSnapshotPolicy( } private void disableSLMMinimumIntervalValidation() throws IOException { - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_MINIMUM_INTERVAL, "0s")); try (XContentBuilder builder = jsonBuilder()) { req.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java index d93f40f7c0a82..30dcd339b43b7 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java @@ -150,7 +150,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } private void assertMasterNode(Client client, String node) { - assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void writeJSONFile(String node, String json) throws Exception { @@ -192,7 +195,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).get(); var reservedState = clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); @@ -205,7 +208,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo equalTo("50mb") ); - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") ); assertEquals( @@ -306,7 +309,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); assertThat(clusterStateResponse.getState().metadata().persistentSettings().get("search.allow_expensive_queries"), nullValue()); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index 1d797095c1f69..d42d45e430627 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -355,7 +355,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex logger.info("--> stopping random data node, which should cause shards to go missing"); internalCluster().stopRandomDataNode(); assertBusy( - () -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth().get().getStatus()), + () -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus()), 30, TimeUnit.SECONDS ); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java index d8b1f36c25e54..14e40a9fe0e52 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java @@ -578,13 +578,19 @@ private void assertMetadata(String policyName, long taken, long failure, long in } private SnapshotLifecycleMetadata getSnapshotLifecycleMetadata() { - final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); ClusterState state = clusterStateResponse.getState(); return state.metadata().custom(SnapshotLifecycleMetadata.TYPE); } private RegisteredPolicySnapshots getRegisteredSnapshots() { - final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); ClusterState state = clusterStateResponse.getState(); return state.metadata().custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index d2e5896a4cf77..d1eaff1bef1b2 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -566,7 +566,7 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { final String targetNode; if (seqNoRecovery) { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index(indexName).shard(0); String primaryNodeName = clusterState.nodes().resolveNode(shardRoutingTable.primaryShard().currentNodeId()).getName(); String replicaNodeName = clusterState.nodes() @@ -597,7 +597,7 @@ public void testRecoveryIsCancelledAfterDeletingTheIndex() throws Exception { ); if (seqNoRecovery) { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexShardRoutingTable shardRoutingTable = clusterState.routingTable().index(indexName).shard(0); String primaryNodeName = clusterState.nodes().resolveNode(shardRoutingTable.primaryShard().currentNodeId()).getName(); @@ -996,7 +996,7 @@ public void testSeqNoBasedRecoveryIsUsedAfterPrimaryFailOver() throws Exception createRepo(repoName, repoType); createSnapshot(repoName, "snap", Collections.singletonList(indexName)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String primaryNodeId = clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId(); String primaryNodeName = clusterState.nodes().resolveNode(primaryNodeId).getName(); @@ -1006,7 +1006,7 @@ public void testSeqNoBasedRecoveryIsUsedAfterPrimaryFailOver() throws Exception ensureGreen(indexName); - ClusterState clusterStateAfterPrimaryFailOver = clusterAdmin().prepareState().get().getState(); + ClusterState clusterStateAfterPrimaryFailOver = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexShardRoutingTable shardRoutingTableAfterFailOver = clusterStateAfterPrimaryFailOver.routingTable().index(indexName).shard(0); String primaryNodeIdAfterFailOver = shardRoutingTableAfterFailOver.primaryShard().currentNodeId(); @@ -1316,7 +1316,7 @@ public void testNodeDisconnectsDoNotOverAccountRecoveredBytes() throws Exception createRepo(repoName, TestRepositoryPlugin.FILTER_TYPE); createSnapshot(repoName, "snap", Collections.singletonList(indexName)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String primaryNodeId = clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId(); String primaryNodeName = clusterState.nodes().resolveNode(primaryNodeId).getName(); String replicaNodeId = clusterState.routingTable().index(indexName).shard(0).replicaShards().get(0).currentNodeId(); @@ -1527,7 +1527,7 @@ private void assertPeerRecoveryDidNotUseSnapshots(String indexName, String sourc } private Store.MetadataSnapshot getMetadataSnapshot(String nodeName, String indexName) throws IOException { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); IndexService indexService = indicesService.indexService(clusterState.metadata().index(indexName).getIndex()); IndexShard shard = indexService.getShard(0); @@ -1561,7 +1561,7 @@ private void indexDocs(String indexName, int docIdOffset, int docCount) throws E // Ensure that the safe commit == latest commit assertBusy(() -> { - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var indexShardRoutingTable = clusterState.routingTable().index(indexName).shard(0); assertThat(indexShardRoutingTable, is(notNullValue())); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java index ae890cb8321dc..e2f9af2c676ce 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.xcontent.XContentBuilder; @@ -389,7 +390,10 @@ protected static boolean allPrimaryShardsActiveForLatestVersionedIndex(ClusterSt } private static void waitForLatestVersionedIndexShardsActive(Client client, ActionListener listener) { - ClusterHealthRequest request = new ClusterHealthRequest(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME) + ClusterHealthRequest request = new ClusterHealthRequest( + TimeValue.THIRTY_SECONDS /* TODO should this be longer/configurable? */, + TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME + ) // cluster health does not wait for active shards per default .waitForActiveShards(ActiveShardCount.ONE); ActionListener innerListener = ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure); diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 58e6e736b1207..b1df355417ac9 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -76,7 +76,10 @@ public void testVotingOnlyNodeStats() throws Exception { internalCluster().startNodes(2); internalCluster().startNode(addRoles(Set.of(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE))); assertBusy( - () -> assertThat(clusterAdmin().prepareState().get().getState().getLastCommittedConfiguration().getNodeIds(), hasSize(3)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getLastCommittedConfiguration().getNodeIds(), + hasSize(3) + ) ); assertThat( clusterAdmin().prepareClusterStats() @@ -98,15 +101,20 @@ public void testPreferFullMasterOverVotingOnlyNodes() throws Exception { internalCluster().startNode(addRoles(Set.of(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE))); internalCluster().startDataOnlyNodes(randomInt(2)); assertBusy( - () -> assertThat(clusterAdmin().prepareState().get().getState().getLastCommittedConfiguration().getNodeIds().size(), equalTo(3)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getLastCommittedConfiguration().getNodeIds().size(), + equalTo(3) + ) ); final String originalMaster = internalCluster().getMasterName(); internalCluster().stopCurrentMasterNode(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get(); assertNotEquals(originalMaster, internalCluster().getMasterName()); assertThat( - VotingOnlyNodePlugin.isVotingOnlyNode(clusterAdmin().prepareState().get().getState().nodes().getMasterNode()), + VotingOnlyNodePlugin.isVotingOnlyNode( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode() + ), equalTo(false) ); } @@ -115,10 +123,15 @@ public void testBootstrapOnlyVotingOnlyNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startNodes(addRoles(Set.of(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE)), Settings.EMPTY, Settings.EMPTY); assertBusy( - () -> assertThat(clusterAdmin().prepareState().get().getState().getLastCommittedConfiguration().getNodeIds().size(), equalTo(3)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getLastCommittedConfiguration().getNodeIds().size(), + equalTo(3) + ) ); assertThat( - VotingOnlyNodePlugin.isVotingOnlyNode(clusterAdmin().prepareState().get().getState().nodes().getMasterNode()), + VotingOnlyNodePlugin.isVotingOnlyNode( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode() + ), equalTo(false) ); } @@ -132,9 +145,11 @@ public void testBootstrapOnlySingleVotingOnlyNode() throws Exception { .build() ); internalCluster().startNode(); - assertBusy(() -> assertThat(clusterAdmin().prepareState().get().getState().getNodes().getSize(), equalTo(2))); + assertBusy(() -> assertThat(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getSize(), equalTo(2))); assertThat( - VotingOnlyNodePlugin.isVotingOnlyNode(clusterAdmin().prepareState().get().getState().nodes().getMasterNode()), + VotingOnlyNodePlugin.isVotingOnlyNode( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode() + ), equalTo(false) ); } @@ -145,16 +160,19 @@ public void testVotingOnlyNodesCannotBeMasterWithoutFullMasterNodes() throws Exc internalCluster().startNodes(2, addRoles(Set.of(DiscoveryNodeRole.VOTING_ONLY_NODE_ROLE))); internalCluster().startDataOnlyNodes(randomInt(2)); assertBusy( - () -> assertThat(clusterAdmin().prepareState().get().getState().getLastCommittedConfiguration().getNodeIds().size(), equalTo(3)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getLastCommittedConfiguration().getNodeIds().size(), + equalTo(3) + ) ); - final String oldMasterId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); + final String oldMasterId = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId(); internalCluster().stopCurrentMasterNode(); expectThrows( MasterNotDiscoveredException.class, () -> assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -167,7 +185,7 @@ public void testVotingOnlyNodesCannotBeMasterWithoutFullMasterNodes() throws Exc // start a fresh full master node, which will be brought into the cluster as master by the voting-only nodes final String newMaster = internalCluster().startNode(); assertEquals(newMaster, internalCluster().getMasterName()); - final String newMasterId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); + final String newMasterId = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId(); assertNotEquals(oldMasterId, newMasterId); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 3b9ea0bd18d47..5dc537fc259d9 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -353,7 +353,7 @@ protected void assertWatchWithMinimumPerformedActionsCount( final AtomicReference lastResponse = new AtomicReference<>(); try { assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames( state, IndicesOptions.lenientExpandOpen(), @@ -429,7 +429,7 @@ protected void assertWatchWithNoActionNeeded(final String watchName, final long assertBusy(() -> { // The watch_history index gets created in the background when the first watch is triggered // so we to check first is this index is created and shards are started - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames( state, IndicesOptions.lenientExpandOpen(), @@ -476,7 +476,7 @@ protected void assertWatchWithNoActionNeeded(final String watchName, final long protected void assertWatchWithMinimumActionsCount(final String watchName, final ExecutionState recordState, final long recordCount) throws Exception { assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames( state, IndicesOptions.lenientExpandOpen(), diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index 265b252082c68..7ff293ed9b150 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -46,7 +46,7 @@ protected boolean timeWarped() { // the watch should be executed properly, despite the index being created and the cluster state listener being reloaded public void testThatLoadingWithNonExistingIndexWorks() throws Exception { stopWatcher(); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata metadata = WatchStoreUtils.getConcreteIndex(Watch.INDEX, clusterStateResponse.getState().metadata()); String watchIndexName = metadata.getIndex().getName(); assertAcked(indicesAdmin().prepareDelete(watchIndexName)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java index 050e7ebe58e42..8d0001c1cc24d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; @@ -62,7 +63,7 @@ public class WatcherExecutorServiceBenchmark { protected static void start() throws Exception { Node node = new MockNode(Settings.builder().put(SETTINGS).put("node.data", false).build(), Arrays.asList(BenchmarkWatcher.class)); client = node.client(); - client.admin().cluster().prepareHealth("*").setWaitForGreenStatus().get(); + client.admin().cluster().prepareHealth(TimeValue.THIRTY_SECONDS, "*").setWaitForGreenStatus().get(); Thread.sleep(5000); scheduler = node.injector().getInstance(ScheduleTriggerEngineMock.class); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index 53ca2fb3b3a35..1691a464d8061 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.InternalSettingsPreparer; @@ -109,7 +110,7 @@ public static void main(String[] args) throws Exception { ).start() ) { final Client client = node.client(); - ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + ClusterHealthResponse response = client.admin().cluster().prepareHealth(TimeValue.THIRTY_SECONDS).setWaitForNodes("2").get(); if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); } @@ -161,9 +162,9 @@ public static void main(String[] args) throws Exception { .build(); try (Node node = new MockNode(settings, Arrays.asList(LocalStateWatcher.class))) { final Client client = node.client(); - client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + client.admin().cluster().prepareHealth(TimeValue.THIRTY_SECONDS).setWaitForNodes("2").get(); client.admin().indices().prepareDelete(HistoryStoreField.DATA_STREAM + "*").get(); - client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); + client.admin().cluster().prepareHealth(TimeValue.THIRTY_SECONDS, Watch.INDEX, "test").setWaitForYellowStatus().get(); Clock clock = node.injector().getInstance(Clock.class); while (new WatcherStatsRequestBuilder(client).get() From d6daef0d7daf41267d53650815fe16e749a347d3 Mon Sep 17 00:00:00 2001 From: Krishna Chaitanya Reddy Burri Date: Tue, 10 Sep 2024 12:53:57 +0530 Subject: [PATCH 16/31] [Cloud Security] Add privileges required for AWS SecurityHub related to CDR misconfiguration features (#112574) Update `kibana_system` privileges to include the ones required for the Security Solution CDR Misconfiguration latest transform to work on AWS SecurityHub integration: - to read from source AWS SecurityHub Findings data stream as one of the data streams providing data for Cloud Detection & Response (CDR) features in Kibana. The destination and alias index privileges already added in #112456. --- docs/changelog/112574.yaml | 5 +++++ .../authz/store/KibanaOwnedReservedRoleDescriptors.java | 2 +- .../core/security/authz/store/ReservedRolesStoreTests.java | 3 ++- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/112574.yaml diff --git a/docs/changelog/112574.yaml b/docs/changelog/112574.yaml new file mode 100644 index 0000000000000..3111697a8b97f --- /dev/null +++ b/docs/changelog/112574.yaml @@ -0,0 +1,5 @@ +pr: 112574 +summary: Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration +area: Authorization +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 6529d4d18fa5d..6177329089bd3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -420,7 +420,7 @@ static RoleDescriptor kibanaSystem(String name) { // For source indices of the Cloud Detection & Response (CDR) packages that ships a // transform RoleDescriptor.IndicesPrivileges.builder() - .indices("logs-wiz.vulnerability-*", "logs-wiz.cloud_configuration_finding-*") + .indices("logs-wiz.vulnerability-*", "logs-wiz.cloud_configuration_finding-*", "logs-aws.securityhub_findings-*") .privileges("read", "view_index_metadata") .build(), // For alias indices of the Cloud Detection & Response (CDR) packages that ships a diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index be4042ae77838..54a5678579ce4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1611,7 +1611,8 @@ public void testKibanaSystemRole() { Arrays.asList( "logs-wiz.vulnerability-" + randomAlphaOfLength(randomIntBetween(0, 13)), - "logs-wiz.cloud_configuration_finding-" + randomAlphaOfLength(randomIntBetween(0, 13)) + "logs-wiz.cloud_configuration_finding-" + randomAlphaOfLength(randomIntBetween(0, 13)), + "logs-aws.securityhub_findings-" + randomAlphaOfLength(randomIntBetween(0, 13)) ).forEach(indexName -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(indexName); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); From 02859bad331199d0a1a838d0c8ba526bda3441bd Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Tue, 10 Sep 2024 17:29:25 +1000 Subject: [PATCH 17/31] Re-check node cache stats before failing (#112688) Closes #112384 --- muted-tests.yml | 3 -- .../shared/NodesCachesStatsIntegTests.java | 52 ++++++++++--------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 23ad0588aa561..a6e3731fa8921 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -132,9 +132,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testMultiIndexDelete issue: https://github.com/elastic/elasticsearch/issues/112381 -- class: org.elasticsearch.xpack.searchablesnapshots.cache.shared.NodesCachesStatsIntegTests - method: testNodesCachesStats - issue: https://github.com/elastic/elasticsearch/issues/112384 - class: org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshotTests method: testToXContent issue: https://github.com/elastic/elasticsearch/issues/112325 diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index e847dc16f2dea..15d03c20af792 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -136,30 +136,34 @@ public void testNodesCachesStats() throws Exception { .collect(toSet()) .toArray(String[]::new); - final NodesCachesStatsResponse response = client().execute( - TransportSearchableSnapshotsNodeCachesStatsAction.TYPE, - new NodesRequest(dataNodesWithFrozenShards) - ).actionGet(); - assertThat( - response.getNodes().stream().map(r -> r.getNode().getId()).collect(Collectors.toList()), - containsInAnyOrder(dataNodesWithFrozenShards) - ); - assertThat(response.hasFailures(), equalTo(false)); - - for (NodeCachesStatsResponse nodeCachesStats : response.getNodes()) { - if (nodeCachesStats.getNumRegions() > 0) { - assertThat(nodeCachesStats.getWrites(), greaterThan(0L)); - assertThat(nodeCachesStats.getBytesWritten(), greaterThan(0L)); - assertThat(nodeCachesStats.getReads(), greaterThan(0L)); - assertThat(nodeCachesStats.getBytesRead(), greaterThan(0L)); - assertThat(nodeCachesStats.getEvictions(), greaterThan(0L)); - } else { - assertThat(nodeCachesStats.getWrites(), equalTo(0L)); - assertThat(nodeCachesStats.getBytesWritten(), equalTo(0L)); - assertThat(nodeCachesStats.getReads(), equalTo(0L)); - assertThat(nodeCachesStats.getBytesRead(), equalTo(0L)); - assertThat(nodeCachesStats.getEvictions(), equalTo(0L)); + // We've seen `getWrites` inexplicably return zero. `assertBusy` to test the theory of it being due + // to contention on the `LongAdder` at `SharedBlobCacheService#writeCount`. + assertBusy(() -> { + final NodesCachesStatsResponse response = client().execute( + TransportSearchableSnapshotsNodeCachesStatsAction.TYPE, + new NodesRequest(dataNodesWithFrozenShards) + ).actionGet(); + assertThat( + response.getNodes().stream().map(r -> r.getNode().getId()).collect(Collectors.toList()), + containsInAnyOrder(dataNodesWithFrozenShards) + ); + assertThat(response.hasFailures(), equalTo(false)); + + for (NodeCachesStatsResponse nodeCachesStats : response.getNodes()) { + if (nodeCachesStats.getNumRegions() > 0) { + assertThat(nodeCachesStats.getWrites(), greaterThan(0L)); + assertThat(nodeCachesStats.getBytesWritten(), greaterThan(0L)); + assertThat(nodeCachesStats.getReads(), greaterThan(0L)); + assertThat(nodeCachesStats.getBytesRead(), greaterThan(0L)); + assertThat(nodeCachesStats.getEvictions(), greaterThan(0L)); + } else { + assertThat(nodeCachesStats.getWrites(), equalTo(0L)); + assertThat(nodeCachesStats.getBytesWritten(), equalTo(0L)); + assertThat(nodeCachesStats.getReads(), equalTo(0L)); + assertThat(nodeCachesStats.getBytesRead(), equalTo(0L)); + assertThat(nodeCachesStats.getEvictions(), equalTo(0L)); + } } - } + }); } } From 6da37658adf08a8fec43afb4957072c6c3dccc14 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:08:53 +0200 Subject: [PATCH 18/31] #101472 Updates default index.translog.flush_threshold_size value (#112052) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * #101472 Updates default index.translog.flush_threshold_size value * Update docs/reference/index-modules/translog.asciidoc Co-authored-by: István Zoltán Szabó * Updates the description --------- Co-authored-by: István Zoltán Szabó --- docs/reference/index-modules/translog.asciidoc | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 52631bc0956b8..0032c7b46bfb2 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -19,7 +19,8 @@ An {es} <> is the process of performing a Lucene commit and starting a new translog generation. Flushes are performed automatically in the background in order to make sure the translog does not grow too large, which would make replaying its operations take a considerable amount of time during -recovery. The ability to perform a flush manually is also exposed through an +recovery. The translog size will never exceed `1%` of the disk size. +The ability to perform a flush manually is also exposed through an API, although this is rarely needed. [discrete] @@ -71,7 +72,8 @@ update, or bulk request. This setting accepts the following parameters: The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not part of a Lucene commit point). Although these operations are available for reads, they will need to be replayed if the shard was stopped - and had to be recovered. This setting controls the maximum total size of these - operations, to prevent recoveries from taking too long. Once the maximum size - has been reached a flush will happen, generating a new Lucene commit point. - Defaults to `512mb`. + and had to be recovered. + This setting controls the maximum total size of these operations to prevent + recoveries from taking too long. Once the maximum size has been reached, a flush + will happen, generating a new Lucene commit point. Defaults to `10 GB`. + From c2d45432505a0d53ec1f80f9a54830feef6e0333 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:17:10 +0100 Subject: [PATCH 19/31] [DOCS][101] Refine mappings + documents/indices overviews (#112545) --- docs/reference/intro.asciidoc | 42 ++++++++++++++++----------------- docs/reference/mapping.asciidoc | 41 ++++++++++++++++++++++---------- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 3ad5a9bd71c08..f80856368af2b 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -14,12 +14,12 @@ Use {es} to search, index, store, and analyze data of all shapes and sizes in ne {es} is used for a wide and growing range of use cases. Here are a few examples: -* *Monitor log and event data*. Store logs, metrics, and event data for observability and security information and event management (SIEM). -* *Build search applications*. Add search capabilities to apps or websites, or build enterprise search engines over your organization's internal data sources. -* *Vector database*. Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models. -* *Retrieval augmented generation (RAG)*. Use {es} as a retrieval engine to augment Generative AI models. -* *Application and security monitoring*. Monitor and analyze application performance and security data effectively. -* *Machine learning*. Use {ml} to automatically model the behavior of your data in real-time. +* *Monitor log and event data*: Store logs, metrics, and event data for observability and security information and event management (SIEM). +* *Build search applications*: Add search capabilities to apps or websites, or build search engines over internal data. +* *Vector database*: Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models. +* *Retrieval augmented generation (RAG)*: Use {es} as a retrieval engine to augment generative AI models. +* *Application and security monitoring*: Monitor and analyze application performance and security data. +* *Machine learning*: Use {ml} to automatically model the behavior of your data in real-time. This is just a sample of search, observability, and security use cases enabled by {es}. Refer to our https://www.elastic.co/customers/success-stories[customer success stories] for concrete examples across a range of industries. @@ -41,25 +41,25 @@ https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its To use {es}, you need a running instance of the {es} service. You can deploy {es} in various ways: -* <>. Get started quickly with a minimal local Docker setup. -* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]. {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14 day free trial]. -* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]. Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14 day free trial]. +* <>: Get started quickly with a minimal local Docker setup. +* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]: {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial]. +* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. **Advanced deployment options** -* <>. Install, configure, and run {es} on your own premises. -* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]. Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. -* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]. Deploy Elastic Cloud on Kubernetes. +* <>: Install, configure, and run {es} on your own premises. +* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]: Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. +* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]: Deploy Elastic Cloud on Kubernetes. [discrete] [[elasticsearch-next-steps]] === Learn more -Some resources to help you get started: +Here are some resources to help you get started: -* <>. A beginner's guide to deploying your first {es} instance, indexing data, and running queries. -* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]. Register for our live webinars to learn directly from {es} experts. -* https://www.elastic.co/search-labs[Elastic Search Labs]. Tutorials and blogs that explore AI-powered search using the latest {es} features. +* <>: A beginner's guide to deploying your first {es} instance, indexing data, and running queries. +* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]: Register for our live webinars to learn directly from {es} experts. +* https://www.elastic.co/search-labs[Elastic Search Labs]: Tutorials and blogs that explore AI-powered search using the latest {es} features. ** Follow our tutorial https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[to build a hybrid search solution in Python]. ** Check out the https://github.com/elastic/elasticsearch-labs?tab=readme-ov-file#elasticsearch-examples--apps[`elasticsearch-labs` repository] for a range of Python notebooks and apps for various use cases. @@ -133,9 +133,9 @@ In {es}, metadata fields are prefixed with an underscore. The most important metadata fields are: -* `_source`. Contains the original JSON document. -* `_index`. The name of the index where the document is stored. -* `_id`. The document's ID. IDs must be unique per index. +* `_source`: Contains the original JSON document. +* `_index`: The name of the index where the document is stored. +* `_id`: The document's ID. IDs must be unique per index. [discrete] [[elasticsearch-intro-documents-fields-mappings]] @@ -146,8 +146,8 @@ A mapping defines the <> for each field, how the field and how it should be stored. When adding documents to {es}, you have two options for mappings: -* <>. Let {es} automatically detect the data types and create the mappings for you. This is great for getting started quickly. -* <>. Define the mappings up front by specifying data types for each field. Recommended for production use cases. +* <>: Let {es} automatically detect the data types and create the mappings for you. This is great for getting started quickly, but can lead to unexpected results for complex data. +* <>: Define the mappings up front by specifying data types for each field. Recommended for production use cases, because you have much more control over how your data is indexed. [TIP] ==== diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 192f581f28d76..239614345d782 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -33,10 +33,13 @@ mapping values by overriding values in the mapping during the search request. [discrete] [[mapping-dynamic]] == Dynamic mapping -<> allows you to experiment with -and explore data when you’re just getting started. {es} adds new fields -automatically, just by indexing a document. You can add fields to the top-level -mapping, and to inner <> and <> fields. + +When you use <>, {es} automatically +attempts to detect the data type of fields in your documents. This allows +you to get started quickly by just adding data to an index. If you index +additional documents with new fields, {es} will add these fields automatically. +You can add fields to the top-level mapping, and to inner <> +and <> fields. Use <> to define custom mappings that are applied to dynamically added fields based on the matching condition. @@ -44,14 +47,28 @@ applied to dynamically added fields based on the matching condition. [discrete] [[mapping-explicit]] == Explicit mapping -<> allows you to precisely choose how to -define the mapping definition, such as: - -* Which string fields should be treated as full text fields. -* Which fields contain numbers, dates, or geolocations. -* The <> of date values. -* Custom rules to control the mapping for - <>. + +Use <> to define exactly how data types +are mapped to fields, customized to your specific use case. + +Defining your own mappings enables you to: + +* Define which string fields should be treated as full-text fields. +* Define which fields contain numbers, dates, or geolocations. +* Use data types that cannot be automatically detected (such as `geo_point` and `geo_shape`.) +* Choose date value <>, including custom date formats. +* Create custom rules to control the mapping for <>. +* Optimize fields for partial matching. +* Perform language-specific text analysis. + +[TIP] +==== +It’s often useful to index the same field in different ways for different purposes. +For example, you might want to index a string field as both a text field for full-text +search and as a keyword field for sorting or aggregating your data. Or, you might +choose to use more than one language analyzer to process the contents of a string field +that contains user input. +==== Use <> to make schema changes without reindexing. You can use runtime fields in conjunction with indexed fields to From 3636797cfe1c2580d5a16f99840666d94bba8fb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Tue, 10 Sep 2024 12:43:08 +0200 Subject: [PATCH 20/31] [DOCS] Adds path params and available task types to the PUT inference page (#112696) Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- .../inference/put-inference.asciidoc | 49 +++++++++++++------ 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index ba26a563541fc..b106e2c4a08fc 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -31,27 +31,46 @@ use the <>. * Requires the `manage_inference` <> (the built-in `inference_admin` role grants this privilege) +[discrete] +[[put-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Refer to the service list in the <> for the available task types. +-- + + [discrete] [[put-inference-api-desc]] ==== {api-description-title} The create {infer} API enables you to create an {infer} endpoint and configure a {ml} model to perform a specific {infer} task. -The following services are available through the {infer} API, click the links to review the configuration details of the services: - -* <> -* <> -* <> -* <> -* <> -* <> -* <> (for built-in models and models uploaded through Eland) -* <> -* <> -* <> -* <> -* <> -* <> +The following services are available through the {infer} API. +You can find the available task types next to the service name. +Click the links to review the configuration details of the services: + +* <> (`rerank`, `sparse_embedding`, `text_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`completion`) +* <> (`completion`, `text_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`completion`, `rerank`, `text_embedding`) +* <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) +* <> (`sparse_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`rerank`, `text_embedding`) +* <> (`text_embedding`) +* <> (`text_embedding`) +* <> (`completion`, `text_embedding`) The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of the services connect to external providers. \ No newline at end of file From 35ae0a431be261aaddfb643201e4c61c7ebf519e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 10 Sep 2024 11:13:39 +0000 Subject: [PATCH 21/31] Bump versions after 7.17.24 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index beb45107bc313..f698f722d977e 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index cd0bc8449f89e..3c98dd4b30e74 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -322,8 +322,8 @@ steps: env: BWC_VERSION: 7.16.3 - - label: "{{matrix.image}} / 7.17.24 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.24 + - label: "{{matrix.image}} / 7.17.25 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.25 timeout_in_minutes: 300 matrix: setup: @@ -337,7 +337,7 @@ steps: buildDirectory: /dev/shm/bk diskSizeGb: 250 env: - BWC_VERSION: 7.17.24 + BWC_VERSION: 7.17.25 - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 8f25a0fb11065..4f862911a2d8c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -342,8 +342,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 7.17.24 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.24#bwcTest + - label: 7.17.25 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.25#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -353,7 +353,7 @@ steps: preemptible: true diskSizeGb: 250 env: - BWC_VERSION: 7.17.24 + BWC_VERSION: 7.17.25 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +821,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["7.17.25", "8.15.2", "8.16.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b80309cdb3f8e..6c5aaa38717ef 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -16,7 +16,7 @@ BWC_VERSION: - "7.14.2" - "7.15.2" - "7.16.3" - - "7.17.24" + - "7.17.25" - "8.0.1" - "8.1.3" - "8.2.3" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index e41bbac68f1ec..f00be923db67c 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.24" + - "7.17.25" - "8.15.2" - "8.16.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 54b6b1ef9c8c8..0164c6b80fa6b 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -124,6 +124,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_22 = new Version(7_17_22_99); public static final Version V_7_17_23 = new Version(7_17_23_99); public static final Version V_7_17_24 = new Version(7_17_24_99); + public static final Version V_7_17_25 = new Version(7_17_25_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 88bf3232a2b17..19528a9719e22 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -71,6 +71,7 @@ 7.17.21,7172199 7.17.22,7172299 7.17.23,7172399 +7.17.24,7172499 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index f89bbb5712634..2e684719688f9 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -71,6 +71,7 @@ 7.17.21,7172199 7.17.22,7172299 7.17.23,7172399 +7.17.24,7172499 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 From dcb7ed2c2099ac2e5e34a12b6a94b105c4337551 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 10 Sep 2024 21:19:23 +1000 Subject: [PATCH 22/31] Mute org.elasticsearch.xpack.ml.integration.MlJobIT testDelete_multipleRequest #112701 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a6e3731fa8921..de97b81294d7c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -210,6 +210,9 @@ tests: - class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT method: test {case-functions.testUcaseInline3} issue: https://github.com/elastic/elasticsearch/issues/112643 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDelete_multipleRequest + issue: https://github.com/elastic/elasticsearch/issues/112701 # Examples: # From ecd887d65107e1254a5b787a6263a85491afb03c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Sep 2024 12:55:34 +0100 Subject: [PATCH 23/31] Remove unused compat shims from `o.e.a.datastreams` (#112697) Relates #111474 Relates #107984 --- .../datastreams/IngestFailureStoreMetricsIT.java | 4 ++-- .../action/datastreams/CreateDataStreamAction.java | 6 ------ .../action/datastreams/DeleteDataStreamAction.java | 5 ----- .../action/datastreams/GetDataStreamAction.java | 6 ------ 4 files changed, 2 insertions(+), 19 deletions(-) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index a52016e8c7f0b..18ba5f4bc1213 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -237,7 +237,7 @@ public void testRerouteSuccessfulCorrectName() throws IOException { createDataStream(); String destination = dataStream + "-destination"; - final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, destination); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); createReroutePipeline(destination); @@ -306,7 +306,7 @@ private void putComposableIndexTemplate(boolean failureStore) throws IOException } private void createDataStream() { - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStream); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index 7c788b10405fc..30c6699ac902e 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,11 +48,6 @@ public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name, l this.startTime = startTime; } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String name) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, name); - } - public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 1a62e347012fe..d95a8bbc4b461 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -53,11 +53,6 @@ public Request(TimeValue masterNodeTimeout, String... names) { this.wildcardExpressionsOriginallySpecified = Arrays.stream(names).anyMatch(Regex::isSimpleMatchPattern); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String... names) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); - } - public String[] getNames() { return names; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 2bcd824dfea3c..2f9ba9220fbce 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.DataStream; @@ -71,11 +70,6 @@ public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefau this.includeDefaults = includeDefaults; } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String[] names) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); - } - public String[] getNames() { return names; } From 5f6bcc8e44d04d605d021f52b0f9e83100c6e628 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 10 Sep 2024 13:29:41 +0100 Subject: [PATCH 24/31] Unmute locale-related test (#112670) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index de97b81294d7c..12183e64bc53d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -35,9 +35,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testCcsMinimizeRoundtripsIsFalse issue: https://github.com/elastic/elasticsearch/issues/101974 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testMinVersionAsOldVersion - issue: https://github.com/elastic/elasticsearch/issues/109454 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" From c2de4b7e57f7c6259bbd5f3b8a8bb41e58fd7eae Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 10 Sep 2024 15:11:06 +0200 Subject: [PATCH 25/31] Allowlist `tracestate` header on remote server port (#112649) The [`tracestate` header](https://www.elastic.co/guide/en/apm/agent/rum-js/current/distributed-tracing-guide.html#enable-tracestate) is an HTTP header used for distributed tracing; it's a valid header to persist in cross cluster requests and should therefore be allowlisted in the remote server port header check. Note: due to implementation details, `tracestate` today may be set on the fulfilling cluster (instead of arriving across the wire) _before_ the header check. Not allowing the header therefore can lead to failures to connect clusters (https://github.com/elastic/elasticsearch/issues/112552). This PR allowlists the header to allow tracing with RCS 2.0. As a separate follow up, we may furthermore change behavior around sending the header from the query cluster to the fulfilling cluster (which we don't today). This is pending further discussion. Closes: https://github.com/elastic/elasticsearch/issues/112552 --- docs/changelog/112649.yaml | 5 + .../remotecluster/ConsumingTestServer.java | 104 +++++++++ ...teClusterSecurityWithApmTracingRestIT.java | 201 ++++++++++++++++++ ...ossClusterAccessServerTransportFilter.java | 1 + 4 files changed, 311 insertions(+) create mode 100644 docs/changelog/112649.yaml create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/ConsumingTestServer.java create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityWithApmTracingRestIT.java diff --git a/docs/changelog/112649.yaml b/docs/changelog/112649.yaml new file mode 100644 index 0000000000000..e3cf1e8e34881 --- /dev/null +++ b/docs/changelog/112649.yaml @@ -0,0 +1,5 @@ +pr: 112649 +summary: Allowlist `tracestate` header on remote server port +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/ConsumingTestServer.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/ConsumingTestServer.java new file mode 100644 index 0000000000000..07fb84cf56000 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/ConsumingTestServer.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xpack.remotecluster; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpServer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.SuppressForbidden; +import org.junit.rules.ExternalResource; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +@SuppressForbidden(reason = "Uses an HTTP server for testing") +class ConsumingTestServer extends ExternalResource { + private static final Logger logger = LogManager.getLogger(ConsumingTestServer.class); + final ArrayBlockingQueue received = new ArrayBlockingQueue<>(1000); + + private static HttpServer server; + private final Thread messageConsumerThread = consumerThread(); + private volatile Consumer consumer; + private volatile boolean consumerRunning = true; + + @Override + protected void before() throws Throwable { + server = HttpServer.create(); + server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + server.createContext("/", this::handle); + server.start(); + + messageConsumerThread.start(); + } + + private Thread consumerThread() { + return new Thread(() -> { + while (consumerRunning) { + if (consumer != null) { + try { + String msg = received.poll(1L, TimeUnit.SECONDS); + if (msg != null && msg.isEmpty() == false) { + consumer.accept(msg); + } + + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + }); + } + + @Override + protected void after() { + server.stop(1); + consumerRunning = false; + } + + private void handle(HttpExchange exchange) throws IOException { + try (exchange) { + try { + try (InputStream requestBody = exchange.getRequestBody()) { + if (requestBody != null) { + var read = readJsonMessages(requestBody); + received.addAll(read); + } + } + + } catch (RuntimeException e) { + logger.warn("failed to parse request", e); + } + exchange.sendResponseHeaders(201, 0); + } + } + + private List readJsonMessages(InputStream input) { + // parse NDJSON + return new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8)).lines().toList(); + } + + public int getPort() { + return server.getAddress().getPort(); + } + + public void addMessageConsumer(Consumer messageConsumer) { + this.consumer = messageConsumer; + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityWithApmTracingRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityWithApmTracingRestIT.java new file mode 100644 index 0000000000000..f4d8177685b33 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityWithApmTracingRestIT.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.spi.XContentProvider; +import org.hamcrest.Matcher; +import org.hamcrest.StringDescription; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class RemoteClusterSecurityWithApmTracingRestIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); + private static final XContentProvider.FormatProvider XCONTENT = XContentProvider.provider().getJsonXContent(); + final String traceIdValue = "0af7651916cd43dd8448eb211c80319c"; + final String traceParentValue = "00-" + traceIdValue + "-b7ad6b7169203331-01"; + + private static final ConsumingTestServer mockApmServer = new ConsumingTestServer(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name("fulfilling-cluster") + .apply(commonClusterConfig) + .setting("telemetry.metrics.enabled", "false") + .setting("telemetry.tracing.enabled", "true") + .setting("telemetry.agent.metrics_interval", "1s") + .setting("telemetry.agent.server_url", () -> "http://127.0.0.1:" + mockApmServer.getPort()) + // to ensure tracestate header is always set to cover RCS 2.0 handling of the tracestate header + .setting("telemetry.agent.transaction_sample_rate", "1.0") + .setting("remote_cluster_server.enabled", "true") + .setting("remote_cluster.port", "0") + .setting("xpack.security.remote_cluster_server.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .rolesFile(Resource.fromClasspath("roles.yml")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name("query-cluster") + .apply(commonClusterConfig) + .setting("telemetry.metrics.enabled", "false") + .setting("telemetry.tracing.enabled", "true") + // to ensure tracestate header is always set to cover RCS 2.0 handling of the tracestate header + .setting("telemetry.agent.transaction_sample_rate", "1.0") + .setting("telemetry.agent.metrics_interval", "1s") + .setting("telemetry.agent.server_url", () -> "http://127.0.0.1:" + mockApmServer.getPort()) + .setting("xpack.security.remote_cluster_client.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .keystore("cluster.remote.my_remote_cluster.credentials", () -> { + if (API_KEY_MAP_REF.get() == null) { + final Map apiKeyMap = createCrossClusterAccessApiKey(""" + { + "search": [ + { + "names": ["*"] + } + ] + }"""); + API_KEY_MAP_REF.set(apiKeyMap); + } + return (String) API_KEY_MAP_REF.get().get("encoded"); + }) + .rolesFile(Resource.fromClasspath("roles.yml")) + .user(REMOTE_METRIC_USER, PASS.toString(), "read_remote_shared_metrics", false) + .build(); + } + + @ClassRule + // Use a RuleChain to ensure that fulfilling cluster is started before query cluster + public static TestRule clusterRule = RuleChain.outerRule(mockApmServer).around(fulfillingCluster).around(queryCluster); + + @SuppressWarnings("unchecked") + public void testTracingCrossCluster() throws Exception { + configureRemoteCluster(); + Set>> assertions = new HashSet<>( + Set.of( + // REST action on query cluster + allTrue( + transactionValue("name", equalTo("GET /_resolve/cluster/{name}")), + transactionValue("trace_id", equalTo(traceIdValue)) + ), + // transport action on fulfilling cluster + allTrue( + transactionValue("name", equalTo("indices:admin/resolve/cluster")), + transactionValue("trace_id", equalTo(traceIdValue)) + ) + ) + ); + + CountDownLatch finished = new CountDownLatch(1); + + // a consumer that will remove the assertions from a map once it matched + Consumer messageConsumer = (String message) -> { + var apmMessage = parseMap(message); + if (isTransactionTraceMessage(apmMessage)) { + logger.info("Apm transaction message received: {}", message); + assertions.removeIf(e -> e.test(apmMessage)); + } + + if (assertions.isEmpty()) { + finished.countDown(); + } + }; + + mockApmServer.addMessageConsumer(messageConsumer); + + // Trigger an action that we know will cross clusters -- doesn't much matter which one + final Request resolveRequest = new Request("GET", "/_resolve/cluster/my_remote_cluster:*"); + resolveRequest.setOptions( + RequestOptions.DEFAULT.toBuilder() + .addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_METRIC_USER, PASS)) + .addHeader(Task.TRACE_PARENT_HTTP_HEADER, traceParentValue) + ); + final Response response = client().performRequest(resolveRequest); + assertOK(response); + + finished.await(30, TimeUnit.SECONDS); + assertThat(assertions, equalTo(Collections.emptySet())); + } + + private boolean isTransactionTraceMessage(Map apmMessage) { + return apmMessage.containsKey("transaction"); + } + + @SuppressWarnings("unchecked") + private Predicate> allTrue(Predicate>... predicates) { + var allTrueTest = Arrays.stream(predicates).reduce(v -> true, Predicate::and); + return new Predicate<>() { + @Override + public boolean test(Map map) { + return allTrueTest.test(map); + } + + @Override + public String toString() { + return Arrays.stream(predicates).map(Object::toString).collect(Collectors.joining(" and ")); + } + }; + } + + @SuppressWarnings("unchecked") + private Predicate> transactionValue(String path, Matcher expected) { + return new Predicate<>() { + @Override + public boolean test(Map map) { + var transaction = (Map) map.get("transaction"); + var value = XContentMapValues.extractValue(path, transaction); + return expected.matches((T) value); + } + + @Override + public String toString() { + StringDescription matcherDescription = new StringDescription(); + expected.describeTo(matcherDescription); + return path + " " + matcherDescription; + } + }; + } + + private Map parseMap(String message) { + try (XContentParser parser = XCONTENT.XContent().createParser(XContentParserConfiguration.EMPTY, message)) { + return parser.map(); + } catch (IOException e) { + fail(e); + return Collections.emptyMap(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java index 9809127080dc5..e3cd1d2f123d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java @@ -41,6 +41,7 @@ final class CrossClusterAccessServerTransportFilter extends ServerTransportFilte Set.of(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY, CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY) ); allowedHeaders.add(AuditUtil.AUDIT_REQUEST_ID); + allowedHeaders.add(Task.TRACE_STATE); allowedHeaders.addAll(Task.HEADERS_TO_COPY); ALLOWED_TRANSPORT_HEADERS = Set.copyOf(allowedHeaders); } From 66303ab5e4ed175c61614b08562a50e74bf00498 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 10 Sep 2024 08:54:46 -0500 Subject: [PATCH 26/31] Fixing a simulate ingest yaml rest test (#112686) --- muted-tests.yml | 3 --- .../rest-api-spec/test/ingest/80_ingest_simulate.yml | 8 +++++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 12183e64bc53d..cf4e519d78a17 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -154,9 +154,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112471 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} - issue: https://github.com/elastic/elasticsearch/issues/112575 - class: org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT method: test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} issue: https://github.com/elastic/elasticsearch/issues/112580 diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 1a77019914283..35ec9979c3250 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -217,7 +217,9 @@ setup: "Test ingest simulate with reroute and mapping validation from templates": - skip: - features: headers + features: + - headers + - allowed_warnings - requires: cluster_features: ["simulate.mapping.validation.templates"] @@ -241,6 +243,8 @@ setup: - match: { acknowledged: true } - do: + allowed_warnings: + - "index template [first-index-template] has index patterns [first-index*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [first-index-template] will take precedence during new index creation" indices.put_index_template: name: first-index-template body: @@ -255,6 +259,8 @@ setup: type: text - do: + allowed_warnings: + - "index template [second-index-template] has index patterns [second-index*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [second-index-template] will take precedence during new index creation" indices.put_index_template: name: second-index-template body: From 7784d4f3537a6d53211ed21f4956656cce280c98 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 10 Sep 2024 15:43:30 +0100 Subject: [PATCH 27/31] Add warning when using a textual date field specifier with the COMPAT locale provider (#112548) Some textual field specifiers change between COMPAT and CLDR, depending on locale --- .../common/time/DateFormatters.java | 2 ++ .../elasticsearch/common/time/DateUtils.java | 17 +++++++++++++++++ .../license/ClusterStateLicenseService.java | 2 +- .../org/elasticsearch/license/LicenseUtils.java | 14 +++++++++++--- 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index ae8f8cb28da11..c1081f3d62850 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -2323,6 +2323,8 @@ static DateFormatter forPattern(String input) { } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) { return STRICT_YEAR_MONTH_DAY; } else { + DateUtils.checkTextualDateFormats(input); + try { return newDateFormatter( input, diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 8e98adc183369..e312ce78ea157 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -10,6 +10,9 @@ import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.logging.LogManager; import java.time.Clock; import java.time.Duration; @@ -19,6 +22,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; +import java.util.regex.Pattern; import static java.util.Map.entry; import static org.elasticsearch.common.time.DateUtilsRounding.getMonthOfYear; @@ -382,4 +387,16 @@ public static ZonedDateTime nowWithMillisResolution(Clock clock) { Clock millisResolutionClock = Clock.tick(clock, Duration.ofMillis(1)); return ZonedDateTime.now(millisResolutionClock); } + + // check for all textual fields, and localized zone offset + private static final Predicate CONTAINS_CHANGING_TEXT_SPECIFIERS = System.getProperty("java.locale.providers", "") + .contains("COMPAT") ? Pattern.compile("[EcGaO]|MMM|LLL|eee|ccc|QQQ|ZZZZ").asPredicate() : Predicates.never(); + + @UpdateForV9 // this can be removed, we will only use CLDR on v9 + static void checkTextualDateFormats(String format) { + if (CONTAINS_CHANGING_TEXT_SPECIFIERS.test(format)) { + LogManager.getLogger(DateFormatter.class) + .warn("Date format [{}] contains textual field specifiers that could change in JDK 23", format); + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index f5123b9352fe3..a38170d87f9a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -140,7 +140,7 @@ CharSequence buildExpirationMessage(long expirationMillis, boolean expired) { License [{}] on [{}]. # If you have a new license, please update it. Otherwise, please reach out to # your support contact. - #\s""", expiredMsg, LicenseUtils.DATE_FORMATTER.formatMillis(expirationMillis)); + #\s""", expiredMsg, LicenseUtils.formatMillis(expirationMillis)); if (expired) { general = general.toUpperCase(Locale.ROOT); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index b27c1bb9d449c..7e67ee892043d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -8,7 +8,6 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.license.License.LicenseType; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.protocol.xpack.license.LicenseStatus; @@ -16,6 +15,9 @@ import java.nio.charset.StandardCharsets; import java.time.Clock; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -25,7 +27,13 @@ public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; - public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("EEEE, MMMM dd, yyyy").withLocale(Locale.ENGLISH); + + public static String formatMillis(long millis) { + // DateFormatters logs a warning about the pattern on COMPAT + // this will be confusing to users, so call DateTimeFormatter directly instead + return DateTimeFormatter.ofPattern("EEEE, MMMM dd, yyyy", Locale.ENGLISH) + .format(Instant.ofEpochMilli(millis).atOffset(ZoneOffset.UTC)); + } /** * Exception to be thrown when a feature action requires a valid license, but license @@ -155,7 +163,7 @@ public static String getExpiryWarning(long licenseExpiryDate, long currentTime) ? "expires today" : (diff > 0 ? String.format(Locale.ROOT, "will expire in [%d] days", days) - : String.format(Locale.ROOT, "expired on [%s]", LicenseUtils.DATE_FORMATTER.formatMillis(licenseExpiryDate))); + : String.format(Locale.ROOT, "expired on [%s]", formatMillis(licenseExpiryDate))); return "Your license " + expiryMessage + ". " From 9081a951d5018595c5305a53d2865d75b10aeb87 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Tue, 10 Sep 2024 09:31:06 -0600 Subject: [PATCH 28/31] Implement CCS telemetry export as part of _cluster/stats (#112310) * Implement CCS telemetry export as part of _cluster/stats --- docs/reference/cluster/stats.asciidoc | 173 +++++++++++++++++- .../org/elasticsearch/TransportVersions.java | 1 + .../cluster/stats/CCSTelemetrySnapshot.java | 4 +- .../cluster/stats/CCSUsageTelemetry.java | 2 +- .../stats/ClusterStatsNodeResponse.java | 18 +- .../cluster/stats/ClusterStatsResponse.java | 16 ++ .../stats/TransportClusterStatsAction.java | 6 +- .../action/search/TransportSearchAction.java | 2 +- .../cluster/stats/VersionStatsTests.java | 3 +- .../ClusterStatsMonitoringDocTests.java | 33 +++- 10 files changed, 248 insertions(+), 10 deletions(-) diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index c39bc0dcd2878..575a6457804a6 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1307,6 +1307,142 @@ Each repository type may also include other statistics about the repositories of ==== +`ccs`:: +(object) Contains information relating to <> settings and activity in the cluster. ++ +.Properties of `ccs` +[%collapsible%open] +===== + + +`_search`::: +(object) Contains the telemetry information about the <> usage in the cluster. ++ +.Properties of `_search` +[%collapsible%open] +====== +`total`::: +(integer) The total number of {ccs} requests that have been executed by the cluster. + +`success`::: +(integer) The total number of {ccs} requests that have been successfully executed by the cluster. + +`skipped`::: +(integer) The total number of {ccs} requests (successful or failed) that had at least one remote cluster skipped. + +`took`::: +(object) Contains statistics about the time taken to execute {ccs} requests. ++ +.Properties of `took` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`took_mrt_true`:: +(object) Contains statistics about the time taken to execute {ccs} requests for which the +<> setting was set to `true`. ++ +.Properties of `took_mrt_true` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`took_mrt_false`:: +(object) Contains statistics about the time taken to execute {ccs} requests for which the +<> setting was set to `false`. ++ +.Properties of `took_mrt_false` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`remotes_per_search_max`:: +(integer) The maximum number of remote clusters that were queried in a single {ccs} request. + +`remotes_per_search_avg`:: +(float) The average number of remote clusters that were queried in a single {ccs} request. + +`failure_reasons`:: +(object) Contains statistics about the reasons for {ccs} request failures. +The keys are the failure reason names and the values are the number of requests that failed for that reason. + +`features`:: +(object) Contains statistics about the features used in {ccs} requests. The keys are the names of the search feature, +and the values are the number of requests that used that feature. Single request can use more than one feature +(e.g. both `async` and `wildcard`). Known features are: + +* `async` - <> + +* `mrt` - <> setting was set to `true`. + +* `wildcard` - <> for indices with wildcards was used in the search request. + +`clients`:: +(object) Contains statistics about the clients that executed {ccs} requests. +The keys are the names of the clients, and the values are the number of requests that were executed by that client. +Only known clients (such as `kibana` or `elasticsearch`) are counted. + +`clusters`:: +(object) Contains statistics about the clusters that were queried in {ccs} requests. +The keys are cluster names, and the values are per-cluster telemetry data. +This also includes the local cluster itself, which uses the name `(local)`. ++ +.Properties of per-cluster data: +[%collapsible%open] +======= +`total`::: +(integer) The total number of successful (not skipped) {ccs} requests that were executed against this cluster. +This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. + +`skipped`::: +(integer) The total number of {ccs} requests for which this cluster was skipped. + +`took`::: +(object) Contains statistics about the time taken to execute requests against this cluster. ++ +.Properties of `took` +[%collapsible%open] +======== +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======== + +======= + +====== + +===== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1607,7 +1743,35 @@ The API returns the following response: }, "repositories": { ... - } + }, + "ccs": { + "_search": { + "total": 7, + "success": 7, + "skipped": 0, + "took": { + "max": 36, + "avg": 20, + "p90": 33 + }, + "took_mrt_true": { + "max": 33, + "avg": 15, + "p90": 33 + }, + "took_mrt_false": { + "max": 36, + "avg": 26, + "p90": 36 + }, + "remotes_per_search_max": 3, + "remotes_per_search_avg": 2.0, + "failure_reasons": { ... }, + "features": { ... }, + "clients": { ... }, + "clusters": { ... } + } + } } -------------------------------------------------- // TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/] @@ -1618,10 +1782,15 @@ The API returns the following response: // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] // TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] +// TESTRESPONSE[s/"clusters": \{[^\}]*\}/"clusters": $body.$_path/] +// TESTRESPONSE[s/"features": \{[^\}]*\}/"features": $body.$_path/] +// TESTRESPONSE[s/"clients": \{[^\}]*\}/"clients": $body.$_path/] +// TESTRESPONSE[s/"failure_reasons": \{[^\}]*\}/"failure_reasons": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] -// TESTRESPONSE[s/: true|false/: $body.$_path/] +// TESTRESPONSE[s/"remotes_per_search_avg": [.0-9]+/"remotes_per_search_avg": $body.$_path/] +// TESTRESPONSE[s/: (true|false)/: $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] // These replacements do a few things: diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6640b8b5eac8f..2bd1d79afd52d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -207,6 +207,7 @@ static TransportVersion def(int id) { public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); + public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java index fe1da86dd54c7..68fd4c2a1529a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java @@ -277,7 +277,7 @@ public int hashCode() { */ public void add(CCSTelemetrySnapshot stats) { // This should be called in ClusterStatsResponse ctor only, so we don't need to worry about concurrency - if (stats.totalCount == 0) { + if (stats == null || stats.totalCount == 0) { // Just ignore the empty stats. // This could happen if the node is brand new or if the stats are not available, e.g. because it runs an old version. return; @@ -315,7 +315,7 @@ public void add(CCSTelemetrySnapshot stats) { * "p90": 2570 * } */ - public static void publishLatency(XContentBuilder builder, String name, LongMetricValue took) throws IOException { + private static void publishLatency(XContentBuilder builder, String name, LongMetricValue took) throws IOException { builder.startObject(name); { builder.field("max", took.max()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java index 60766bd4068e3..6016378aa8867 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -175,7 +175,7 @@ public static class PerClusterCCSTelemetry { // The number of successful (not skipped) requests to this cluster. private final LongAdder count; private final LongAdder skippedCount; - // This is only over the successful requetss, skipped ones do not count here. + // This is only over the successful requests, skipped ones do not count here. private final LongMetric took; PerClusterCCSTelemetry(String clusterAlias) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index b48295dc8b3eb..732eb2ec2dcc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; private final RepositoryUsageStats repositoryUsageStats; + private final CCSTelemetrySnapshot ccsMetrics; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -47,6 +48,11 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } else { repositoryUsageStats = RepositoryUsageStats.EMPTY; } + if (in.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { + ccsMetrics = new CCSTelemetrySnapshot(in); + } else { + ccsMetrics = new CCSTelemetrySnapshot(); + } } public ClusterStatsNodeResponse( @@ -56,7 +62,8 @@ public ClusterStatsNodeResponse( NodeStats nodeStats, ShardStats[] shardsStats, SearchUsageStats searchUsageStats, - RepositoryUsageStats repositoryUsageStats + RepositoryUsageStats repositoryUsageStats, + CCSTelemetrySnapshot ccsTelemetrySnapshot ) { super(node); this.nodeInfo = nodeInfo; @@ -65,6 +72,7 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; this.searchUsageStats = Objects.requireNonNull(searchUsageStats); this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); + this.ccsMetrics = ccsTelemetrySnapshot; } public NodeInfo nodeInfo() { @@ -95,6 +103,10 @@ public RepositoryUsageStats repositoryUsageStats() { return repositoryUsageStats; } + public CCSTelemetrySnapshot getCcsMetrics() { + return ccsMetrics; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -108,5 +120,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { repositoryUsageStats.writeTo(out); } // else just drop these stats, ok for bwc + if (out.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { + ccsMetrics.writeTo(out); + } } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index b6dd40e8c8b79..267db92496f76 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.action.search.TransportSearchAction.CCS_TELEMETRY_FEATURE_FLAG; + public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { final ClusterStatsNodes nodesStats; @@ -31,6 +33,8 @@ public class ClusterStatsResponse extends BaseNodesResponse ccsMetrics.add(node.getCcsMetrics())); this.status = status; this.clusterSnapshotStats = clusterSnapshotStats; @@ -90,6 +96,10 @@ public ClusterStatsIndices getIndicesStats() { return indicesStats; } + public CCSTelemetrySnapshot getCcsMetrics() { + return ccsMetrics; + } + @Override public void writeTo(StreamOutput out) throws IOException { TransportAction.localOnly(); @@ -125,6 +135,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repositories"); repositoryUsageStats.toXContent(builder, params); + if (CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) { + builder.startObject("ccs"); + ccsMetrics.toXContent(builder, params); + builder.endObject(); + } + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 1912de3cfa4d2..66cf627ce066e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -81,6 +81,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final IndicesService indicesService; private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; + private final CCSUsageTelemetry ccsUsageHolder; private final MetadataStatsCache mappingStatsCache; private final MetadataStatsCache analysisStatsCache; @@ -108,6 +109,7 @@ public TransportClusterStatsAction( this.indicesService = indicesService; this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); + this.ccsUsageHolder = usageService.getCcsUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); } @@ -249,6 +251,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); + final CCSTelemetrySnapshot ccsTelemetry = ccsUsageHolder.getCCSTelemetrySnapshot(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -257,7 +260,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), searchUsageStats, - repositoryUsageStats + repositoryUsageStats, + ccsTelemetry ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 23ff692da4887..30faae9c1a5fb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -127,7 +127,7 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 20eae9833e4b0..b6f1ac46b4250 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -128,7 +128,8 @@ public void testCreation() { null, new ShardStats[] { shardStats }, new SearchUsageStats(), - RepositoryUsageStats.EMPTY + RepositoryUsageStats.EMPTY, + null ); stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 4a695f7c51e4c..279fec8cc99af 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -754,7 +754,38 @@ public void testToXContent() throws IOException { }, "repositories": {} }, - "repositories": {} + "repositories": {}, + "ccs": { + "_search": { + "total": 0, + "success": 0, + "skipped": 0, + "took": { + "max": 0, + "avg": 0, + "p90": 0 + }, + "took_mrt_true": { + "max": 0, + "avg": 0, + "p90": 0 + }, + "took_mrt_false": { + "max": 0, + "avg": 0, + "p90": 0 + }, + "remotes_per_search_max": 0, + "remotes_per_search_avg": 0.0, + "failure_reasons": { + }, + "features": { + }, + "clients": { + }, + "clusters": {} + } + } }, "cluster_state": { "nodes_hash": 1314980060, From 8c088d50730e4d833343ab0e6b3f9e4c709aace7 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 10 Sep 2024 18:08:56 +0200 Subject: [PATCH 29/31] [test] Track index commits internally acquired by the commits listener in CombinedDeletionPolicy (#112507) After finishing an integration test, we run some checks against the test cluster, among others we assert there are no leaky acquired index commits left in `InternalTestCluster#beforeIndexDeletion`. The issue is that while we check the test cluster before we shut it down, we can't guarantee that there wouldn't be a new commit triggered by a background merge which will acquire an index commit. But we actually don't care about these commits acquired internally as part of `CombinedDeletionPolicy#commitsListener` callbacks. We just want to make sure that all index commits that have been acquired explicitly are also released. So, we make an explicit distinction between external and internal index commits that are tracked in `CombinedDeletionPolicy`. ES-8407 --- .../index/engine/CombinedDeletionPolicy.java | 43 +++++++++++++++---- .../index/engine/InternalEngine.java | 4 +- .../index/engine/EngineTestCase.java | 4 +- .../test/InternalTestCluster.java | 2 +- .../CcrRestoreSourceServiceTests.java | 4 +- 5 files changed, 42 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 22bab1742589e..43b0c27d30580 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -43,6 +43,9 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; private final Map acquiredIndexCommits; // Number of references held against each commit point. + // Index commits internally acquired by the commits listener. We want to track them separately to be able to disregard them + // when checking for externally acquired index commits that haven't been released + private final Set internallyAcquiredIndexCommits; interface CommitsListener { @@ -72,6 +75,7 @@ interface CommitsListener { this.globalCheckpointSupplier = globalCheckpointSupplier; this.commitsListener = commitsListener; this.acquiredIndexCommits = new HashMap<>(); + this.internallyAcquiredIndexCommits = new HashSet<>(); } @Override @@ -114,7 +118,7 @@ public void onCommit(List commits) throws IOException { this.maxSeqNoOfNextSafeCommit = Long.parseLong(commits.get(keptPosition + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); } if (commitsListener != null && previousLastCommit != this.lastCommit) { - newCommit = acquireIndexCommit(false); + newCommit = acquireIndexCommit(false, true); } else { newCommit = null; } @@ -210,15 +214,25 @@ SafeCommitInfo getSafeCommitInfo() { * @param acquiringSafeCommit captures the most recent safe commit point if true; otherwise captures the most recent commit point. */ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { + return acquireIndexCommit(acquiringSafeCommit, false); + } + + private synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit, boolean acquiredInternally) { assert safeCommit != null : "Safe commit is not initialized yet"; assert lastCommit != null : "Last commit is not initialized yet"; final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit; acquiredIndexCommits.merge(snapshotting, 1, Integer::sum); // increase refCount - return wrapCommit(snapshotting); + assert acquiredInternally == false || internallyAcquiredIndexCommits.add(snapshotting) + : "commit [" + snapshotting + "] already added"; + return wrapCommit(snapshotting, acquiredInternally); } protected IndexCommit wrapCommit(IndexCommit indexCommit) { - return new SnapshotIndexCommit(indexCommit); + return wrapCommit(indexCommit, false); + } + + protected IndexCommit wrapCommit(IndexCommit indexCommit, boolean acquiredInternally) { + return new SnapshotIndexCommit(indexCommit, acquiredInternally); } /** @@ -227,7 +241,8 @@ protected IndexCommit wrapCommit(IndexCommit indexCommit) { * @return true if the acquired commit can be clean up. */ synchronized boolean releaseCommit(final IndexCommit acquiredCommit) { - final IndexCommit releasingCommit = ((SnapshotIndexCommit) acquiredCommit).getIndexCommit(); + final SnapshotIndexCommit snapshotIndexCommit = (SnapshotIndexCommit) acquiredCommit; + final IndexCommit releasingCommit = snapshotIndexCommit.getIndexCommit(); assert acquiredIndexCommits.containsKey(releasingCommit) : "Release non-acquired commit;" + "acquired commits [" @@ -242,6 +257,8 @@ synchronized boolean releaseCommit(final IndexCommit acquiredCommit) { } return count - 1; }); + assert snapshotIndexCommit.acquiredInternally == false || internallyAcquiredIndexCommits.remove(releasingCommit) + : "Trying to release a commit [" + releasingCommit + "] that hasn't been previously acquired internally"; assert refCount == null || refCount > 0 : "Number of references for acquired commit can not be negative [" + refCount + "]"; // The commit can be clean up only if no refCount and it is neither the safe commit nor last commit. @@ -296,10 +313,16 @@ private static Set listOfNewFileNames(IndexCommit previous, IndexCommit } /** - * Checks whether the deletion policy is holding on to acquired index commits + * Checks whether the deletion policy is holding on to externally acquired index commits */ - synchronized boolean hasAcquiredIndexCommits() { - return acquiredIndexCommits.isEmpty() == false; + synchronized boolean hasAcquiredIndexCommitsForTesting() { + // We explicitly check only external commits and disregard internal commits acquired by the commits listener + for (var e : acquiredIndexCommits.entrySet()) { + if (internallyAcquiredIndexCommits.contains(e.getKey()) == false || e.getValue() > 1) { + return true; + } + } + return false; } /** @@ -320,8 +343,12 @@ public static String commitDescription(IndexCommit commit) throws IOException { * A wrapper of an index commit that prevents it from being deleted. */ private static class SnapshotIndexCommit extends FilterIndexCommit { - SnapshotIndexCommit(IndexCommit delegate) { + + private final boolean acquiredInternally; + + SnapshotIndexCommit(IndexCommit delegate, boolean acquiredInternally) { super(delegate); + this.acquiredInternally = acquiredInternally; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9743ee977a8c4..7c456f55ac8ad 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -669,8 +669,8 @@ Translog getTranslog() { } // Package private for testing purposes only - boolean hasAcquiredIndexCommits() { - return combinedDeletionPolicy.hasAcquiredIndexCommits(); + boolean hasAcquiredIndexCommitsForTesting() { + return combinedDeletionPolicy.hasAcquiredIndexCommitsForTesting(); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 8412e9e250885..5387108592b10 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1440,10 +1440,10 @@ public static void waitForOpsToComplete(InternalEngine engine, long seqNo) throw assertBusy(() -> assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), greaterThanOrEqualTo(seqNo))); } - public static boolean hasAcquiredIndexCommits(Engine engine) { + public static boolean hasAcquiredIndexCommitsForTesting(Engine engine) { assert engine instanceof InternalEngine : "only InternalEngines have snapshotted commits, got: " + engine.getClass(); InternalEngine internalEngine = (InternalEngine) engine; - return internalEngine.hasAcquiredIndexCommits(); + return internalEngine.hasAcquiredIndexCommitsForTesting(); } public static final class PrimaryTermSupplier implements LongSupplier { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 77762544c4718..823f5084f5d59 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1369,7 +1369,7 @@ private void assertNoAcquiredIndexCommit() throws Exception { if (engine instanceof InternalEngine) { assertFalse( indexShard.routingEntry().toString() + " has unreleased snapshotted index commits", - EngineTestCase.hasAcquiredIndexCommits(engine) + EngineTestCase.hasAcquiredIndexCommitsForTesting(engine) ); } } catch (AlreadyClosedException ignored) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java index 99344f22bae31..f577ccd4e5a44 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java @@ -215,9 +215,9 @@ public void testGetSessionDoesNotLeakFileIfClosed() throws IOException { sessionReader.readFileBytes(files.get(1).name(), MockBigArrays.NON_RECYCLING_INSTANCE.newByteArray(10, false)); } - assertTrue(EngineTestCase.hasAcquiredIndexCommits(IndexShardTestCase.getEngine(indexShard))); + assertTrue(EngineTestCase.hasAcquiredIndexCommitsForTesting(IndexShardTestCase.getEngine(indexShard))); restoreSourceService.closeSession(sessionUUID); - assertFalse(EngineTestCase.hasAcquiredIndexCommits(IndexShardTestCase.getEngine(indexShard))); + assertFalse(EngineTestCase.hasAcquiredIndexCommitsForTesting(IndexShardTestCase.getEngine(indexShard))); closeShards(indexShard); // Exception will be thrown if file is not closed. From 574915d6a6bfb2075b919f8c642212f0b2df4f25 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Tue, 10 Sep 2024 13:09:40 -0400 Subject: [PATCH 30/31] Fix dra info fetch for 8.x branch --- .ci/scripts/resolve-dra-manifest.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.ci/scripts/resolve-dra-manifest.sh b/.ci/scripts/resolve-dra-manifest.sh index bd7a9bbbdafee..4ac94122351aa 100755 --- a/.ci/scripts/resolve-dra-manifest.sh +++ b/.ci/scripts/resolve-dra-manifest.sh @@ -23,7 +23,14 @@ LATEST_VERSION=$(strip_version $LATEST_BUILD) if [ "$LATEST_VERSION" != "$ES_VERSION" ]; then echo "Latest build for '$ARTIFACT' is version $LATEST_VERSION but expected version $ES_VERSION." 1>&2 NEW_BRANCH=$(echo $ES_VERSION | sed -E "s/([0-9]+\.[0-9]+)\.[0-9]/\1/g") + + # Temporary + if [[ "$ES_VERSION" == "8.16.0" ]]; then + NEW_BRANCH="8.x" + fi + echo "Using branch $NEW_BRANCH instead of $BRANCH." 1>&2 + echo "https://artifacts-$WORKFLOW.elastic.co/$ARTIFACT/latest/$NEW_BRANCH.json" LATEST_BUILD=$(fetch_build $WORKFLOW $ARTIFACT $NEW_BRANCH) fi From f6ace50953ff2eea2825b917daa6cbf803a76466 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 10 Sep 2024 19:37:52 +0200 Subject: [PATCH 31/31] EQL: Mute bwc tests that have incompatibilities with Java 23 (#112699) The switch to Java 23 removed support for COMPAT locale provider, so running old versions (< 8.15.2) in a mixed cluster will result in different results for date format (eg. month and day names are truncated to three letters). Fixes https://github.com/elastic/elasticsearch/issues/112617 --- muted-tests.yml | 2 -- x-pack/plugin/eql/qa/rest/build.gradle | 8 ++++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index cf4e519d78a17..1fe7cbb9540b3 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -178,8 +178,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testPutJob_GivenFarequoteConfig issue: https://github.com/elastic/elasticsearch/issues/112382 -- class: org.elasticsearch.xpack.eql.EqlClientYamlIT - issue: https://github.com/elastic/elasticsearch/issues/112617 - class: org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidatorTests method: testWhenKeyTabWithInvalidContentFailsValidation issue: https://github.com/elastic/elasticsearch/issues/112631 diff --git a/x-pack/plugin/eql/qa/rest/build.gradle b/x-pack/plugin/eql/qa/rest/build.gradle index 5f1911dd579bf..d035005758a54 100644 --- a/x-pack/plugin/eql/qa/rest/build.gradle +++ b/x-pack/plugin/eql/qa/rest/build.gradle @@ -30,6 +30,14 @@ tasks.named('yamlRestTestV7CompatTest') { usesDefaultDistribution() } +tasks.named("yamlRestTestV7CompatTransform").configure {task -> + task.skipTest("eql/10_basic/Execute EQL events query with wildcard (*) fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") + task.skipTest("eql/10_basic/Execute EQL sequence with fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") + task.skipTest("eql/10_basic/Execute EQL sequence with custom format for timestamp field.", "Change of locale with Java 23 makes these tests non deterministic") + task.skipTest("eql/10_basic/Execute EQL events query with fields filtering", "Change of locale with Java 23 makes these tests non deterministic") + task.skipTest("eql/10_basic/Execute EQL sequence with wildcard (*) fields filtering.", "Change of locale with Java 23 makes these tests non deterministic") +} + if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false }