From 8134c79ce8a11b26ee26d88ad7975c91a127af98 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Tue, 17 Dec 2024 13:05:20 +0100 Subject: [PATCH 1/5] ESQL: Skip lookup fields when eliminating missing fields (#118658) We do not have SearchStats for fields from lookup indices. And unfortunately, these are hard to obtain. For now, just do not apply ReplaceMissingFieldWithNull to fields coming from an index used in LOOKUP JOIN. These are identified via their indexmode. --- .../esql/qa/mixed/MixedClusterEsqlSpecIT.java | 4 +- .../xpack/esql/ccq/MultiClusterSpecIT.java | 8 +- .../rest/RequestIndexFilteringTestCase.java | 2 +- .../src/main/resources/lookup-join.csv-spec | 212 ++++++++++++------ .../xpack/esql/action/EsqlCapabilities.java | 2 +- .../local/ReplaceMissingFieldWithNull.java | 15 +- .../elasticsearch/xpack/esql/CsvTests.java | 2 +- .../xpack/esql/analysis/AnalyzerTests.java | 4 +- .../xpack/esql/analysis/VerifierTests.java | 2 +- .../optimizer/LogicalPlanOptimizerTests.java | 12 +- .../optimizer/PhysicalPlanOptimizerTests.java | 2 +- 11 files changed, 180 insertions(+), 85 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 5efe7ffc800a..004beaafb400 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V6; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V6.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index dd75776973c3..c75a920e1697 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V6; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V7; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V6.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V7.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ protected boolean supportsInferenceTestService() { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V6 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V6.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V7 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V7.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java index 2aae4c94c33f..40027249670f 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -221,7 +221,7 @@ public void testIndicesDontExist() throws IOException { assertThat(e.getMessage(), containsString("index_not_found_exception")); assertThat(e.getMessage(), containsString("no such index [foo]")); - if (EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()) { + if (EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()) { e = expectThrows( ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test1 | LOOKUP JOIN foo ON id1")) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 8b8d24b1bb15..8bcc2c2ff350 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -8,7 +8,7 @@ ############################################### basicOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -25,7 +25,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -36,7 +36,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -53,7 +53,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -71,7 +71,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -89,7 +89,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; sortEvalBeforeLookup -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -106,7 +106,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueLeftKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE emp_no <= 10030 @@ -130,7 +130,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueRightKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | EVAL language_code = emp_no % 10 @@ -150,7 +150,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyOnTheCoordinator -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | SORT emp_no @@ -170,7 +170,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = 2 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -186,8 +186,8 @@ language_code:integer | language_name:keyword | country:keyword # Filtering tests with languages_lookup index ############################################### -lookupWithFilterOnLeftSideField -required_capability: join_lookup_v6 +filterOnLeftSide +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages @@ -203,8 +203,8 @@ emp_no:integer | language_code:integer | language_name:keyword 10093 | 3 | Spanish ; -lookupMessageWithFilterOnRightSideField-Ignore -required_capability: join_lookup_v6 +filterOnRightSide +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -219,8 +219,8 @@ FROM sample_data 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Error ; -lookupWithFieldAndRightSideAfterStats -required_capability: join_lookup_v6 +filterOnRightSideAfterStats +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -232,23 +232,110 @@ count:long | type:keyword 3 | Error ; -lookupWithFieldOnJoinKey-Ignore -required_capability: join_lookup_v6 +filterOnJoinKey +required_capability: join_lookup_v7 FROM employees | EVAL language_code = languages +| WHERE emp_no >= 10091 AND emp_no < 10094 +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code == 1 +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10092 | 1 | English +; + +filterOnJoinKeyAndRightSide +required_capability: join_lookup_v7 + +FROM employees +| WHERE emp_no < 10006 +| EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code | WHERE language_code > 1 AND language_name IS NOT NULL | KEEP emp_no, language_code, language_name ; +ignoreOrder:true emp_no:integer | language_code:integer | language_name:keyword 10001 | 2 | French 10003 | 4 | German ; +filterOnRightSideOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_name == "English" +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10005 | 1 | English +; + +filterOnJoinKeyOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code == 1 +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10005 | 1 | English +; + +filterOnJoinKeyAndRightSideOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| SORT emp_no +| LIMIT 5 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_code > 1 AND language_name IS NOT NULL +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10003 | 4 | German +; + +filterOnTheDataNodeThenFilterOnTheCoordinator +required_capability: join_lookup_v7 + +FROM employees +| EVAL language_code = languages +| WHERE emp_no >= 10091 AND emp_no < 10094 +| LOOKUP JOIN languages_lookup ON language_code +| WHERE language_name == "English" +| KEEP emp_no, language_code, language_name +| SORT emp_no +| WHERE language_code == 1 +; + +emp_no:integer | language_code:integer | language_name:keyword +10092 | 1 | English +; + +########################################################################### +# null and multi-value behavior with languages_lookup_non_unique_key index +########################################################################### + nullJoinKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE emp_no < 10004 @@ -264,9 +351,8 @@ emp_no:integer | language_code:integer | language_name:keyword 10003 | null | null ; - mvJoinKeyOnTheDataNode -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM employees | WHERE 10003 < emp_no AND emp_no < 10008 @@ -284,7 +370,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -297,7 +383,7 @@ language_code:integer | language_name:keyword | country:keyword ; mvJoinKeyFromRowExpanded -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW language_code = [4, 5, 6, 7, 8] | MV_EXPAND language_code @@ -319,7 +405,7 @@ language_code:integer | language_name:keyword | country:keyword ############################################### lookupIPFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -330,7 +416,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromKeepRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", right = "right" | KEEP left, client_ip, right @@ -342,7 +428,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -353,7 +439,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -366,7 +452,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -379,7 +465,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -398,7 +484,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -418,7 +504,7 @@ ignoreOrder:true ; lookupIPFromIndexKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -440,7 +526,7 @@ timestamp:date | client_ip:keyword | event_duration:long | msg:keyword ; lookupIPFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -456,7 +542,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -473,7 +559,7 @@ count:long | env:keyword ; statsAndLookupIPFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -494,7 +580,7 @@ count:long | client_ip:keyword | env:keyword ############################################### lookupMessageFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -505,7 +591,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromKeepRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | KEEP left, message, right @@ -517,7 +603,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -528,7 +614,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -540,7 +626,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -558,7 +644,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -577,7 +663,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -597,7 +683,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -616,7 +702,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -631,7 +717,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -647,7 +733,7 @@ count:long | type:keyword ; statsAndLookupMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | STATS count = count(message) BY message @@ -665,7 +751,7 @@ count:long | type:keyword | message:keyword ; lookupMessageFromIndexTwice -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -687,7 +773,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -714,7 +800,7 @@ ignoreOrder:true ############################################### lookupIPAndMessageFromRow -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -726,7 +812,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBefore -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | KEEP left, client_ip, message, right @@ -739,7 +825,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBetween -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -752,7 +838,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepAfter -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -765,7 +851,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowing -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", type = "type", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -777,7 +863,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -791,7 +877,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -806,7 +892,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeepKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -822,7 +908,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepReordered -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -836,7 +922,7 @@ right | Development | Success | 172.21.0.5 ; lookupIPAndMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -856,7 +942,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -877,7 +963,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexStats -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -895,7 +981,7 @@ count:long | env:keyword | type:keyword ; lookupIPAndMessageFromIndexStatsKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -914,7 +1000,7 @@ count:long | env:keyword | type:keyword ; statsAndLookupIPAndMessageFromIndex -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -933,7 +1019,7 @@ count:long | client_ip:keyword | message:keyword | env:keyword | type:keyw ; lookupIPAndMessageFromIndexChainedEvalKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -955,7 +1041,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexChainedRenameKeep -required_capability: join_lookup_v6 +required_capability: join_lookup_v7 FROM sample_data | EVAL client_ip = client_ip::keyword diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 235d0dcbe416..4fcabb02b2d4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -547,7 +547,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V6(Build.current().isSnapshot()), + JOIN_LOOKUP_V7(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java index 096f72f7694e..f9d86ecf0f61 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer.rules.logical.local; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; @@ -41,10 +42,17 @@ public class ReplaceMissingFieldWithNull extends ParameterizedRule missingToNull(p, localLogicalOptimizerContext.searchStats())); + AttributeSet lookupFields = new AttributeSet(); + plan.forEachUp(EsRelation.class, esRelation -> { + if (esRelation.indexMode() == IndexMode.LOOKUP) { + lookupFields.addAll(esRelation.output()); + } + }); + + return plan.transformUp(p -> missingToNull(p, localLogicalOptimizerContext.searchStats(), lookupFields)); } - private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats) { + private LogicalPlan missingToNull(LogicalPlan plan, SearchStats stats, AttributeSet lookupFields) { if (plan instanceof EsRelation || plan instanceof LocalRelation) { return plan; } @@ -95,7 +103,8 @@ else if (plan instanceof Project project) { plan = plan.transformExpressionsOnlyUp( FieldAttribute.class, // Do not use the attribute name, this can deviate from the field name for union types. - f -> stats.exists(f.fieldName()) ? f : Literal.of(f, null) + // Also skip fields from lookup indices because we do not have stats for these. + f -> stats.exists(f.fieldName()) || lookupFields.contains(f) ? f : Literal.of(f, null) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index f553c15ef69f..717ac7b5a62a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -263,7 +263,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V6.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V7.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 4e02119b3174..9c71f20dcde0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2139,7 +2139,7 @@ public void testLookupMatchTypeWrong() { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2168,7 +2168,7 @@ public void testLookupJoinUnknownIndex() { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 58180aafedc0..182e87d1ab9d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1964,7 +1964,7 @@ public void testSortByAggregate() { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index c4d7b30115c2..cfb993a7dd73 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -4906,7 +4906,7 @@ public void testPlanSanityCheck() throws Exception { } public void testPlanSanityCheckWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); var plan = optimizedPlan(""" FROM test @@ -5911,7 +5911,7 @@ public void testLookupStats() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -5954,7 +5954,7 @@ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnLeftSideField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -5998,7 +5998,7 @@ public void testLookupJoinPushDownFilterOnLeftSideField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownDisabledForLookupField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -6043,7 +6043,7 @@ public void testLookupJoinPushDownDisabledForLookupField() { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test @@ -6096,7 +6096,7 @@ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightFiel * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); String query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 9f6ef89008a2..964dd4642d7c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -2331,7 +2331,7 @@ public void testVerifierOnMissingReferences() { } public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V6.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); // Do not assert serialization: // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. From 6516a535ab487eabf02b7d65b7cbe928820a181b Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 17 Dec 2024 13:45:27 +0100 Subject: [PATCH 2/5] Add wolfi documentation from 8.16 branch (#118835) port from https://github.com/elastic/elasticsearch/pull/118684 --- docs/Versions.asciidoc | 1 + docs/reference/setup/install/docker.asciidoc | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index bdb0704fcd88..f2e61861bd3a 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -9,6 +9,7 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] :docker-repo: docker.elastic.co/elasticsearch/elasticsearch :docker-image: {docker-repo}:{version} +:docker-wolfi-image: {docker-repo}-wolfi:{version} :kib-docker-repo: docker.elastic.co/kibana/kibana :kib-docker-image: {kib-docker-repo}:{version} :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8694d7f5b46c..86a0e567f6ee 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -55,6 +55,12 @@ docker pull {docker-image} // REVIEWED[DEC.10.24] -- +Alternatevely, you can use the Wolfi based image. Using Wolfi based images requires Docker version 20.10.10 or superior. +[source,sh,subs="attributes"] +---- +docker pull {docker-wolfi-image} +---- + . Optional: Install https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your environment. Then use Cosign to verify the {es} image's signature. From f5712e4875122d9f5c451ac73225725179795929 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 17 Dec 2024 13:18:42 +0000 Subject: [PATCH 3/5] Infrastructure for assuming cluster features in the next major version (#118143) Allow features to be marked as 'assumed', allowing them to be removed in the next major version. --- .../forbidden/es-server-signatures.txt | 4 +- docs/changelog/118143.yaml | 5 + .../cluster/ClusterFeatures.java | 56 ++++- .../coordination/NodeJoinExecutor.java | 81 +++++- .../org/elasticsearch/env/BuildVersion.java | 6 + .../env/DefaultBuildVersion.java | 11 + .../features/FeatureService.java | 23 +- .../elasticsearch/features/NodeFeature.java | 9 +- .../readiness/ReadinessService.java | 4 +- .../DataStreamAutoShardingServiceTests.java | 29 ++- .../coordination/NodeJoinExecutorTests.java | 232 +++++++++++++++++- .../features/FeatureServiceTests.java | 41 ++++ .../HealthNodeTaskExecutorTests.java | 2 +- .../slm/SnapshotLifecycleServiceTests.java | 3 + 14 files changed, 464 insertions(+), 42 deletions(-) create mode 100644 docs/changelog/118143.yaml diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index a9da7995c2b3..53480a4a27b0 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -155,10 +155,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() -@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) +org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.features.NodeFeature) @defaultMessage Do not construct this records outside the source files they are declared in org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult) diff --git a/docs/changelog/118143.yaml b/docs/changelog/118143.yaml new file mode 100644 index 000000000000..4dcbf4b4b6c2 --- /dev/null +++ b/docs/changelog/118143.yaml @@ -0,0 +1,5 @@ +pr: 118143 +summary: Infrastructure for assuming cluster features in the next major version +area: "Infra/Core" +type: feature +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index ad285cbd391c..5b5a6577082d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -9,11 +9,12 @@ package org.elasticsearch.cluster; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ToXContent; @@ -79,28 +80,61 @@ public Map> nodeFeatures() { return nodeFeatures; } - /** - * The features in all nodes in the cluster. - *

- * NOTE: This should not be used directly. - * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. - */ - public Set allNodeFeatures() { + private Set allNodeFeatures() { if (allNodeFeatures == null) { allNodeFeatures = Set.copyOf(calculateAllNodeFeatures(nodeFeatures.values())); } return allNodeFeatures; } + /** + * Returns {@code true} if {@code node} can have assumed features. + * @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures + */ + public static boolean featuresCanBeAssumedForNode(DiscoveryNode node) { + return node.getBuildVersion().canRemoveAssumedFeatures(); + } + + /** + * Returns {@code true} if one or more nodes in {@code nodes} can have assumed features. + * @see org.elasticsearch.env.BuildVersion#canRemoveAssumedFeatures + */ + public static boolean featuresCanBeAssumedForNodes(DiscoveryNodes nodes) { + return nodes.getAllNodes().stream().anyMatch(n -> n.getBuildVersion().canRemoveAssumedFeatures()); + } + /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

* NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ - @SuppressForbidden(reason = "directly reading cluster features") - public boolean clusterHasFeature(NodeFeature feature) { - return allNodeFeatures().contains(feature.id()); + public boolean clusterHasFeature(DiscoveryNodes nodes, NodeFeature feature) { + assert nodes.getNodes().keySet().equals(nodeFeatures.keySet()) + : "Cluster features nodes " + nodeFeatures.keySet() + " is different to discovery nodes " + nodes.getNodes().keySet(); + + // basic case + boolean allNodesHaveFeature = allNodeFeatures().contains(feature.id()); + if (allNodesHaveFeature) { + return true; + } + + // if the feature is assumed, check the versions more closely + // it's actually ok if the feature is assumed, and all nodes missing the feature can assume it + // TODO: do we need some kind of transient cache of this calculation? + if (feature.assumedAfterNextCompatibilityBoundary()) { + for (var nf : nodeFeatures.entrySet()) { + if (nf.getValue().contains(feature.id()) == false + && featuresCanBeAssumedForNode(nodes.getNodes().get(nf.getKey())) == false) { + return false; + } + } + + // all nodes missing the feature can assume it - so that's alright then + return true; + } + + return false; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 5235293a54d9..74a8dc7851c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -39,6 +40,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -137,8 +139,8 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); - Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); - Set allNodesFeatures = ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); + Map> nodeFeatures = new HashMap<>(newState.nodeFeatures()); // as present in cluster state + Set effectiveClusterFeatures = calculateEffectiveClusterFeatures(newState.nodes(), nodeFeatures); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -174,14 +176,17 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); - enforceNodeFeatureBarrier(node.getId(), allNodesFeatures, features); + Set newNodeEffectiveFeatures = enforceNodeFeatureBarrier(node, effectiveClusterFeatures, features); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); + nodesBuilder.add(node); compatibilityVersionsMap.put(node.getId(), compatibilityVersions); + // store the actual node features here, not including assumed features, as this is persisted in cluster state nodeFeatures.put(node.getId(), features); - allNodesFeatures.retainAll(features); + effectiveClusterFeatures.retainAll(newNodeEffectiveFeatures); + nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -355,6 +360,35 @@ private static void blockForbiddenVersions(TransportVersion joiningTransportVers } } + /** + * Calculate the cluster's effective features. This includes all features that are assumed on any nodes in the cluster, + * that are also present across the whole cluster as a result. + */ + private Set calculateEffectiveClusterFeatures(DiscoveryNodes nodes, Map> nodeFeatures) { + if (featureService.featuresCanBeAssumedForNodes(nodes)) { + Set assumedFeatures = featureService.getNodeFeatures() + .values() + .stream() + .filter(NodeFeature::assumedAfterNextCompatibilityBoundary) + .map(NodeFeature::id) + .collect(Collectors.toSet()); + + // add all assumed features to the featureset of all nodes of the next major version + nodeFeatures = new HashMap<>(nodeFeatures); + for (var node : nodes.getNodes().entrySet()) { + if (featureService.featuresCanBeAssumedForNode(node.getValue())) { + assert nodeFeatures.containsKey(node.getKey()) : "Node " + node.getKey() + " does not have any features"; + nodeFeatures.computeIfPresent(node.getKey(), (k, v) -> { + var newFeatures = new HashSet<>(v); + return newFeatures.addAll(assumedFeatures) ? newFeatures : v; + }); + } + } + } + + return ClusterFeatures.calculateAllNodeFeatures(nodeFeatures.values()); + } + /** * Ensures that all indices are compatible with the given index version. This will ensure that all indices in the given metadata * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index @@ -461,13 +495,44 @@ public static void ensureVersionBarrier(Version joiningNodeVersion, Version minC } } - private void enforceNodeFeatureBarrier(String nodeId, Set existingNodesFeatures, Set newNodeFeatures) { + /** + * Enforces the feature join barrier - a joining node should have all features already present in all existing nodes in the cluster + * + * @return The set of features that this node has (including assumed features) + */ + private Set enforceNodeFeatureBarrier(DiscoveryNode node, Set effectiveClusterFeatures, Set newNodeFeatures) { // prevent join if it does not have one or more features that all other nodes have - Set missingFeatures = new HashSet<>(existingNodesFeatures); + Set missingFeatures = new HashSet<>(effectiveClusterFeatures); missingFeatures.removeAll(newNodeFeatures); - if (missingFeatures.isEmpty() == false) { - throw new IllegalStateException("Node " + nodeId + " is missing required features " + missingFeatures); + if (missingFeatures.isEmpty()) { + // nothing missing - all ok + return newNodeFeatures; + } + + if (featureService.featuresCanBeAssumedForNode(node)) { + // it might still be ok for this node to join if this node can have assumed features, + // and all the missing features are assumed + // we can get the NodeFeature object direct from this node's registered features + // as all existing nodes in the cluster have the features present in existingNodesFeatures, including this one + newNodeFeatures = new HashSet<>(newNodeFeatures); + for (Iterator it = missingFeatures.iterator(); it.hasNext();) { + String feature = it.next(); + NodeFeature nf = featureService.getNodeFeatures().get(feature); + if (nf.assumedAfterNextCompatibilityBoundary()) { + // its ok for this feature to be missing from this node + it.remove(); + // and it should be assumed to still be in the cluster + newNodeFeatures.add(feature); + } + // even if we don't remove it, still continue, so the exception message below is accurate + } + } + + if (missingFeatures.isEmpty()) { + return newNodeFeatures; + } else { + throw new IllegalStateException("Node " + node.getId() + " is missing required features " + missingFeatures); } } diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 7a6b27eab233..5c3602283fef 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -37,6 +37,12 @@ */ public abstract class BuildVersion implements ToXContentFragment, Writeable { + /** + * Checks if this version can operate properly in a cluster without features + * that are assumed in the currently running Elasticsearch. + */ + public abstract boolean canRemoveAssumedFeatures(); + /** * Check whether this version is on or after a minimum threshold. * diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index a7e1a4fee341..70aa3f6639a4 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -47,6 +47,17 @@ final class DefaultBuildVersion extends BuildVersion { this(in.readVInt()); } + @Override + public boolean canRemoveAssumedFeatures() { + /* + * We can remove assumed features if the node version is the next major version. + * This is because the next major version can only form a cluster with the + * latest minor version of the previous major, so any features introduced before that point + * (that are marked as assumed in the running code version) are automatically met by that version. + */ + return version.major == Version.CURRENT.major + 1; + } + @Override public boolean onOrAfterMinimumCompatible() { return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 9a0ac7cafc18..c04fbae05ee2 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -9,7 +9,10 @@ package org.elasticsearch.features; +import org.elasticsearch.cluster.ClusterFeatures; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -38,9 +41,7 @@ public class FeatureService { * as the local node's supported feature set */ public FeatureService(List specs) { - - var featureData = FeatureData.createFromSpecifications(specs); - nodeFeatures = featureData.getNodeFeatures(); + this.nodeFeatures = FeatureData.createFromSpecifications(specs).getNodeFeatures(); logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } @@ -53,11 +54,25 @@ public Map getNodeFeatures() { return nodeFeatures; } + /** + * Returns {@code true} if {@code node} can have assumed features. + */ + public boolean featuresCanBeAssumedForNode(DiscoveryNode node) { + return ClusterFeatures.featuresCanBeAssumedForNode(node); + } + + /** + * Returns {@code true} if one or more nodes in {@code nodes} can have assumed features. + */ + public boolean featuresCanBeAssumedForNodes(DiscoveryNodes nodes) { + return ClusterFeatures.featuresCanBeAssumedForNodes(nodes); + } + /** * Returns {@code true} if all nodes in {@code state} support feature {@code feature}. */ @SuppressForbidden(reason = "We need basic feature information from cluster state") public boolean clusterHasFeature(ClusterState state, NodeFeature feature) { - return state.clusterFeatures().clusterHasFeature(feature); + return state.clusterFeatures().clusterHasFeature(state.nodes(), feature); } } diff --git a/server/src/main/java/org/elasticsearch/features/NodeFeature.java b/server/src/main/java/org/elasticsearch/features/NodeFeature.java index 957308e80556..961b386d6280 100644 --- a/server/src/main/java/org/elasticsearch/features/NodeFeature.java +++ b/server/src/main/java/org/elasticsearch/features/NodeFeature.java @@ -15,10 +15,17 @@ * A feature published by a node. * * @param id The feature id. Must be unique in the node. + * @param assumedAfterNextCompatibilityBoundary + * {@code true} if this feature is removed at the next compatibility boundary (ie next major version), + * and so should be assumed to be true for all nodes after that boundary. */ -public record NodeFeature(String id) { +public record NodeFeature(String id, boolean assumedAfterNextCompatibilityBoundary) { public NodeFeature { Objects.requireNonNull(id); } + + public NodeFeature(String id) { + this(id, false); + } } diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 15b9eacfa211..de56ead9b5ab 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -294,8 +294,8 @@ protected boolean areFileSettingsApplied(ClusterState clusterState) { } @SuppressForbidden(reason = "need to check file settings support on exact cluster state") - private static boolean supportsFileSettings(ClusterState clusterState) { - return clusterState.clusterFeatures().clusterHasFeature(FileSettingsFeatures.FILE_SETTINGS_SUPPORTED); + private boolean supportsFileSettings(ClusterState clusterState) { + return clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), FileSettingsFeatures.FILE_SETTINGS_SUPPORTED); } private void setReady(boolean ready) { diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java index 2c6e273bb6e2..ba0f04d174f4 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexWriteLoad; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -110,6 +112,7 @@ public void testCalculateValidations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -143,8 +146,9 @@ public Set getFeatures() { // cluster doesn't have feature ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder()).build(); + Settings settings = Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(); DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(), + settings, clusterService, new FeatureService(List.of()), () -> now @@ -155,15 +159,16 @@ public Set getFeatures() { } { + Settings settings = Settings.builder() + .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) + .putList( + DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), + List.of("foo", dataStreamName + "*") + ) + .build(); // patterns are configured to exclude the current data stream DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService( - Settings.builder() - .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true) - .putList( - DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(), - List.of("foo", dataStreamName + "*") - ) - .build(), + settings, clusterService, new FeatureService(List.of()), () -> now @@ -199,6 +204,7 @@ public void testCalculateIncreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -237,6 +243,7 @@ public void testCalculateIncreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -275,6 +282,7 @@ public void testCalculateIncreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -313,6 +321,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -353,6 +362,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -401,6 +411,7 @@ public void testCalculateDecreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -447,6 +458,7 @@ public void testCalculateDecreaseShardingRecommendations() { ); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", @@ -487,6 +499,7 @@ public void testCalculateDecreaseShardingRecommendations() { DataStream dataStream = dataStreamSupplier.apply(null); builder.put(dataStream); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2"))) .nodeFeatures( Map.of( "n1", diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 27775270a83e..492a142492e1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -46,11 +47,13 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect; @@ -227,6 +230,227 @@ public Set getFeatures() { ); } + @SuppressForbidden(reason = "we need to actually check what is in cluster state") + private static Map> getRecordedNodeFeatures(ClusterState state) { + return state.clusterFeatures().nodeFeatures(); + } + + private static Version nextMajor() { + return Version.fromId((Version.CURRENT.major + 1) * 1_000_000 + 99); + } + + public void testCanJoinClusterWithAssumedFeatures() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true), new NodeFeature("af2", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1", "af2")); + features.put(otherNode.getId(), Set.of("f1", "af1", "af2")); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + // it is valid for major+1 versions to join clusters assumed features still present + // this can happen in the process of marking, then removing, assumed features + // they should still be recorded appropriately + DiscoveryNode newNode = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNode, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1", "af2"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ) + ) + ); + features.put(newNode.getId(), Set.of("f1", "af2")); + + // extra final check that the recorded cluster features are as they should be + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + + public void testJoinClusterWithAssumedFeaturesDoesntAllowNonAssumed() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1")); + features.put(otherNode.getId(), Set.of("f1", "af1")); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + DiscoveryNode newNodeNextMajor = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeNextMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ) + ) + ); + features.put(newNodeNextMajor.getId(), Set.of("f1")); + + // even though a next major has joined without af1, this doesnt allow the current major to join with af1 missing features + DiscoveryNode newNodeCurMajor = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + AtomicReference ex = new AtomicReference<>(); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeCurMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(ex::set), + 0L + ) + ) + ); + assertThat(ex.get().getMessage(), containsString("missing required features [af1]")); + + // a next major can't join missing non-assumed features + DiscoveryNode newNodeNextMajorMissing = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + ex.set(null); + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.singleNode( + newNodeNextMajorMissing, + CompatibilityVersionsUtils.staticCurrent(), + Set.of(), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(ex::set), + 0L + ) + ) + ); + assertThat(ex.get().getMessage(), containsString("missing required features [f1]")); + + // extra final check that the recorded cluster features are as they should be, and newNodeNextMajor hasn't gained af1 + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + + /* + * Same as above but the current major missing features is processed in the same execution + */ + public void testJoinClusterWithAssumedFeaturesDoesntAllowNonAssumedSameExecute() throws Exception { + AllocationService allocationService = createAllocationService(); + RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + FeatureService featureService = new FeatureService(List.of(new FeatureSpecification() { + @Override + public Set getFeatures() { + return Set.of(new NodeFeature("f1"), new NodeFeature("af1", true)); + } + })); + + NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService, featureService); + + DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + Map> features = new HashMap<>(); + features.put(masterNode.getId(), Set.of("f1", "af1")); + features.put(otherNode.getId(), Set.of("f1", "af1")); + + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(otherNode)) + .nodeFeatures(features) + .build(); + + DiscoveryNode newNodeNextMajor = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + DiscoveryNode newNodeCurMajor = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + DiscoveryNode newNodeNextMajorMissing = DiscoveryNodeUtils.builder(UUIDs.base64UUID()) + .version(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + // even though a next major could join, this doesnt allow the current major to join with missing features + // nor a next major missing non-assumed features + AtomicReference thisMajorEx = new AtomicReference<>(); + AtomicReference nextMajorEx = new AtomicReference<>(); + List tasks = List.of( + JoinTask.singleNode( + newNodeNextMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + NO_FAILURE_LISTENER, + 0L + ), + JoinTask.singleNode( + newNodeCurMajor, + CompatibilityVersionsUtils.staticCurrent(), + Set.of("f1"), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(thisMajorEx::set), + 0L + ), + JoinTask.singleNode( + newNodeNextMajorMissing, + CompatibilityVersionsUtils.staticCurrent(), + Set.of(), + TEST_REASON, + ActionTestUtils.assertNoSuccessListener(nextMajorEx::set), + 0L + ) + ); + if (randomBoolean()) { + // sometimes combine them together into a single task for completeness + tasks = List.of(new JoinTask(tasks.stream().flatMap(t -> t.nodeJoinTasks().stream()).toList(), false, 0L, null)); + } + + clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful(clusterState, executor, tasks); + features.put(newNodeNextMajor.getId(), Set.of("f1")); + + assertThat(thisMajorEx.get().getMessage(), containsString("missing required features [af1]")); + assertThat(nextMajorEx.get().getMessage(), containsString("missing required features [f1]")); + + // extra check that the recorded cluster features are as they should be, and newNodeNextMajor hasn't gained af1 + assertThat(getRecordedNodeFeatures(clusterState), equalTo(features)); + } + public void testSuccess() { Settings.builder().build(); Metadata.Builder metaBuilder = Metadata.builder(); @@ -921,8 +1145,8 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { .nodeFeatures(Map.of(masterNode.getId(), Set.of("f1", "f2"), rejoinNode.getId(), Set.of())) .build(); - assertThat(clusterState.clusterFeatures().clusterHasFeature(new NodeFeature("f1")), is(false)); - assertThat(clusterState.clusterFeatures().clusterHasFeature(new NodeFeature("f2")), is(false)); + assertThat(clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), new NodeFeature("f1")), is(false)); + assertThat(clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), new NodeFeature("f2")), is(false)); final var resultingState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( clusterState, @@ -939,8 +1163,8 @@ public void testSetsNodeFeaturesWhenRejoining() throws Exception { ) ); - assertThat(resultingState.clusterFeatures().clusterHasFeature(new NodeFeature("f1")), is(true)); - assertThat(resultingState.clusterFeatures().clusterHasFeature(new NodeFeature("f2")), is(true)); + assertThat(resultingState.clusterFeatures().clusterHasFeature(resultingState.nodes(), new NodeFeature("f1")), is(true)); + assertThat(resultingState.clusterFeatures().clusterHasFeature(resultingState.nodes(), new NodeFeature("f2")), is(true)); } private DesiredNodeWithStatus createActualizedDesiredNode() { diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index 874a6a96313e..a64303f376b2 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -9,8 +9,14 @@ package org.elasticsearch.features; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -69,6 +75,12 @@ public void testStateHasFeatures() { ); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.create("node1")) + .add(DiscoveryNodeUtils.create("node2")) + .add(DiscoveryNodeUtils.create("node3")) + ) .nodeFeatures( Map.of("node1", Set.of("f1", "f2", "nf1"), "node2", Set.of("f1", "f2", "nf2"), "node3", Set.of("f1", "f2", "nf1")) ) @@ -81,4 +93,33 @@ public void testStateHasFeatures() { assertFalse(service.clusterHasFeature(state, new NodeFeature("nf2"))); assertFalse(service.clusterHasFeature(state, new NodeFeature("nf3"))); } + + private static Version nextMajor() { + return Version.fromId((Version.CURRENT.major + 1) * 1_000_000 + 99); + } + + public void testStateHasAssumedFeatures() { + List specs = List.of( + new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2"), new NodeFeature("af1", true))) + ); + + ClusterState state = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNodeUtils.create("node1")) + .add(DiscoveryNodeUtils.create("node2")) + .add( + DiscoveryNodeUtils.builder("node3") + .version(new VersionInformation(nextMajor(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current())) + .build() + ) + ) + .nodeFeatures(Map.of("node1", Set.of("f1", "af1"), "node2", Set.of("f1", "f2", "af1"), "node3", Set.of("f1", "f2"))) + .build(); + + FeatureService service = new FeatureService(specs); + assertTrue(service.clusterHasFeature(state, new NodeFeature("f1"))); + assertFalse(service.clusterHasFeature(state, new NodeFeature("f2"))); + assertTrue(service.clusterHasFeature(state, new NodeFeature("af1", true))); + } } diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java index 97f44f7480a7..92bfabf6f197 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java @@ -77,8 +77,8 @@ public void setUp() throws Exception { clusterService = createClusterService(threadPool); localNodeId = clusterService.localNode().getId(); persistentTasksService = mock(PersistentTasksService.class); - featureService = new FeatureService(List.of(new HealthFeatures())); settings = Settings.builder().build(); + featureService = new FeatureService(List.of(new HealthFeatures())); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 36887681f557..9955fe4cf0f9 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -529,6 +529,7 @@ public void testValidateIntervalScheduleSupport() { var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); @@ -540,6 +541,7 @@ public void testValidateIntervalScheduleSupport() { } { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a"))) .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); try { @@ -550,6 +552,7 @@ public void testValidateIntervalScheduleSupport() { } { ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("a")).add(DiscoveryNodeUtils.create("b"))) .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) .build(); try { From 312c21a3240339477c70fb512b5643b23952d572 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 18 Dec 2024 00:58:11 +1100 Subject: [PATCH 4/5] Mute org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests testPruneSome #118728 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index fe6c77bdf9f9..cdf3007ee002 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -304,6 +304,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118806 - class: org.elasticsearch.xpack.esql.session.IndexResolverFieldNamesTests issue: https://github.com/elastic/elasticsearch/issues/118814 +- class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests + method: testPruneSome + issue: https://github.com/elastic/elasticsearch/issues/118728 # Examples: # From e0763c25ae9600611fe93b8d4133b5106ff280fd Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 17 Dec 2024 17:16:40 +0200 Subject: [PATCH 5/5] Mark the lookup join tests in IndexResolverFieldNamesTests as snapshot-only (#118815) --- muted-tests.yml | 2 -- .../esql/session/IndexResolverFieldNamesTests.java | 13 +++++++++++++ .../qa/server/src/main/resources/docs/docs.csv-spec | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index cdf3007ee002..42845fda8218 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -302,8 +302,6 @@ tests: - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT method: testDeletingAndCreatingSecurityIndexTriggersSynchronization issue: https://github.com/elastic/elasticsearch/issues/118806 -- class: org.elasticsearch.xpack.esql.session.IndexResolverFieldNamesTests - issue: https://github.com/elastic/elasticsearch/issues/118814 - class: org.elasticsearch.index.engine.RecoverySourcePruneMergePolicyTests method: testPruneSome issue: https://github.com/elastic/elasticsearch/issues/118728 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index e4271a0a6ddd..31ec4663738f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.Build; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.ParsingException; @@ -1364,6 +1365,7 @@ public void testMetrics() { } public void testLookupJoin() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( "FROM employees | KEEP languages | RENAME languages AS language_code | LOOKUP JOIN languages_lookup ON language_code", Set.of("languages", "languages.*", "language_code", "language_code.*"), @@ -1372,6 +1374,7 @@ public void testLookupJoin() { } public void testLookupJoinKeep() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM employees @@ -1385,6 +1388,7 @@ public void testLookupJoinKeep() { } public void testLookupJoinKeepWildcard() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM employees @@ -1398,6 +1402,7 @@ public void testLookupJoinKeepWildcard() { } public void testMultiLookupJoin() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1410,6 +1415,7 @@ public void testMultiLookupJoin() { } public void testMultiLookupJoinKeepBefore() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1423,6 +1429,7 @@ public void testMultiLookupJoinKeepBefore() { } public void testMultiLookupJoinKeepBetween() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1447,6 +1454,7 @@ public void testMultiLookupJoinKeepBetween() { } public void testMultiLookupJoinKeepAfter() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1473,6 +1481,7 @@ public void testMultiLookupJoinKeepAfter() { } public void testMultiLookupJoinKeepAfterWildcard() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1486,6 +1495,7 @@ public void testMultiLookupJoinKeepAfterWildcard() { } public void testMultiLookupJoinSameIndex() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1499,6 +1509,7 @@ public void testMultiLookupJoinSameIndex() { } public void testMultiLookupJoinSameIndexKeepBefore() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1513,6 +1524,7 @@ public void testMultiLookupJoinSameIndexKeepBefore() { } public void testMultiLookupJoinSameIndexKeepBetween() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1538,6 +1550,7 @@ public void testMultiLookupJoinSameIndexKeepBetween() { } public void testMultiLookupJoinSameIndexKeepAfter() { + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V7.isEnabled()); assertFieldNames( """ FROM sample_data diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec index 60e81be43cc9..2fa82c05cc1a 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/docs/docs.csv-spec @@ -3353,7 +3353,7 @@ Alejandro Amabile Anoosh Basil -Bojan +Brendon // end::filterToday ;