From 2ff6bb05431ea1525278dcc858e01ce30fb0e15c Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 21 Oct 2024 17:08:50 -0500 Subject: [PATCH 001/202] Adding support for additional mapping to simulate ingest API (#114742) --- docs/changelog/114742.yaml | 5 + .../ingest/apis/simulate-ingest.asciidoc | 13 + .../test/ingest/80_ingest_simulate.yml | 355 ++++++++++++++++++ .../bulk/TransportSimulateBulkActionIT.java | 54 ++- .../org/elasticsearch/TransportVersions.java | 1 + .../action/bulk/BulkFeatures.java | 4 +- .../action/bulk/SimulateBulkRequest.java | 77 ++-- .../bulk/TransportSimulateBulkAction.java | 172 +++++---- .../ingest/RestSimulateIngestAction.java | 17 +- .../action/bulk/SimulateBulkRequestTests.java | 94 ++++- .../TransportSimulateBulkActionTests.java | 8 +- .../ingest/SimulateIngestServiceTests.java | 6 +- 12 files changed, 675 insertions(+), 131 deletions(-) create mode 100644 docs/changelog/114742.yaml diff --git a/docs/changelog/114742.yaml b/docs/changelog/114742.yaml new file mode 100644 index 0000000000000..5bd3dad4400b8 --- /dev/null +++ b/docs/changelog/114742.yaml @@ -0,0 +1,5 @@ +pr: 114742 +summary: Adding support for additional mapping to simulate ingest API +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc index 1bee03ea3e58a..da591eed7546f 100644 --- a/docs/reference/ingest/apis/simulate-ingest.asciidoc +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -108,6 +108,14 @@ POST /_ingest/_simulate "index_patterns": ["my-index-*"], "composed_of": ["component_template_1", "component_template_2"] } + }, + "mapping_addition": { <4> + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + } + } } } ---- @@ -117,6 +125,7 @@ POST /_ingest/_simulate These templates can be used to change the pipeline(s) used, or to modify the mapping that will be used to validate the result. <3> This replaces the existing `my-index-template` index template with the contents given here for the duration of this request. These templates can be used to change the pipeline(s) used, or to modify the mapping that will be used to validate the result. +<4> This mapping is merged into the index's final mapping just before validation. It is used only for the duration of this request. [[simulate-ingest-api-request]] ==== {api-request-title} @@ -246,6 +255,10 @@ include::{es-ref-dir}/indices/put-index-template.asciidoc[tag=request-body] ==== +`mapping_addition`:: +(Optional, <>) +Definition of a mapping that will be merged into the index's mapping for validation during the course of this request. + [[simulate-ingest-api-example]] ==== {api-examples-title} diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 18eb401aaa0fe..d4aa2f1ad4467 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1216,3 +1216,358 @@ setup: - match: { docs.0.doc._source.foo: "FOO" } - match: { docs.0.doc.executed_pipelines: ["foo-pipeline-2"] } - not_exists: docs.0.doc.error + +--- +"Test ingest simulate with mapping addition for data streams": + # In this test, we make sure that when the index template is a data stream template, simulate ingest works the same whether the data + # stream has been created or not -- either way, we expect it to use the template rather than the data stream / index mappings and settings. + + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.mapping.addition"] + reason: "ingest simulate mapping addition added in 8.16" + + - do: + headers: + Content-Type: application/json + ingest.put_pipeline: + id: "foo-pipeline" + body: > + { + "processors": [ + { + "set": { + "field": "foo", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + cluster.put_component_template: + name: mappings_template + body: + template: + mappings: + dynamic: strict + properties: + foo: + type: boolean + + - do: + cluster.put_component_template: + name: settings_template + body: + template: + settings: + index: + default_pipeline: "foo-pipeline" + + - do: + allowed_warnings: + - "index template [test-composable-1] has index patterns [foo*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-1] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-1 + body: + index_patterns: + - foo* + composed_of: + - mappings_template + - settings_template + + - do: + allowed_warnings: + - "index template [my-template-1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template-1] will take precedence during new index creation" + indices.put_index_template: + name: my-template-1 + body: + index_patterns: [simple-data-stream1] + composed_of: + - mappings_template + - settings_template + data_stream: {} + + # Here we replace my-template-1 with a substitute version that uses the settings_template_2 and mappings_template_2 templates defined in + # this request, and foo-pipeline-2 defined in this request. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: simple-data-stream1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "@timestamp": 1234, + "foo": false + } + } + ], + "pipeline_substitutions": { + "foo-pipeline-2": { + "processors": [ + { + "set": { + "field": "foo", + "value": "FOO" + } + } + ] + } + }, + "component_template_substitutions": { + "settings_template_2": { + "template": { + "settings": { + "index": { + "default_pipeline": "foo-pipeline-2" + } + } + } + }, + "mappings_template_2": { + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "integer" + } + } + } + } + } + }, + "index_template_substitutions": { + "my-template-1": { + "index_patterns": ["simple-data-stream1"], + "composed_of": ["settings_template_2", "mappings_template_2"], + "data_stream": {} + } + }, + "mapping_addition": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "simple-data-stream1" } + - match: { docs.0.doc._source.foo: "FOO" } + - match: { docs.0.doc.executed_pipelines: ["foo-pipeline-2"] } + - not_exists: docs.0.doc.error + + - do: + indices.create_data_stream: + name: simple-data-stream1 + - is_true: acknowledged + + - do: + cluster.health: + wait_for_status: yellow + + # Now that we have created a data stream, run the exact same simulate ingeset request to make sure we still get the same result, and that + # the substitutions and additions from the simulate ingest request are used instead of information from the data stream or its backing + # index. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: simple-data-stream1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "@timestamp": 1234, + "foo": false + } + } + ], + "pipeline_substitutions": { + "foo-pipeline-2": { + "processors": [ + { + "set": { + "field": "foo", + "value": "FOO" + } + } + ] + } + }, + "component_template_substitutions": { + "settings_template_2": { + "template": { + "settings": { + "index": { + "default_pipeline": "foo-pipeline-2" + } + } + } + }, + "mappings_template_2": { + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "integer" + } + } + } + } + } + }, + "index_template_substitutions": { + "my-template-1": { + "index_patterns": ["simple-data-stream1"], + "composed_of": ["settings_template_2", "mappings_template_2"], + "data_stream": {} + } + }, + "mapping_addition": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "simple-data-stream1" } + - match: { docs.0.doc._source.foo: "FOO" } + - match: { docs.0.doc.executed_pipelines: ["foo-pipeline-2"] } + - not_exists: docs.0.doc.error + +--- +"Test mapping addition works with legacy templates": + # In this test, we make sure that when the index template is a data stream template, simulate ingest works the same whether the data + # stream has been created or not -- either way, we expect it to use the template rather than the data stream / index mappings and settings. + + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.mapping.addition"] + reason: "ingest simulate mapping addition added in 8.16" + + - do: + indices.put_template: + name: my-legacy-template + body: + index_patterns: foo-* + settings: + number_of_replicas: 0 + mappings: + dynamic: strict + properties: + foo: + type: integer + bar: + type: boolean + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "not a boolean" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "not a boolean" } + - match: { docs.0.doc.error.type: "document_parsing_exception" } + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "not a boolean" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "not a boolean" } + - not_exists: docs.0.doc.error + + - do: + indices.create: + index: foo-1 + - match: { acknowledged: true } + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "not a boolean" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "not a boolean" } + - not_exists: docs.0.doc.error diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java index cd17c5b345c59..d5d21c548a15d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -59,7 +60,7 @@ public void testMappingValidationIndexExists() { } """; indicesAdmin().create(new CreateIndexRequest(indexName).mapping(mapping)).actionGet(); - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -131,10 +132,10 @@ public void testMappingValidationIndexExistsTemplateSubstitutions() throws IOExc String indexName = "my-index-1"; // First, run before the index is created: - assertMappingsUpdatedFromComponentTemplateSubstitutions(indexName, indexTemplateName); + assertMappingsUpdatedFromSubstitutions(indexName, indexTemplateName); // Now, create the index and make sure the component template substitutions work the same: indicesAdmin().create(new CreateIndexRequest(indexName)).actionGet(); - assertMappingsUpdatedFromComponentTemplateSubstitutions(indexName, indexTemplateName); + assertMappingsUpdatedFromSubstitutions(indexName, indexTemplateName); // Now make sure nothing was actually changed: indicesAdmin().refresh(new RefreshRequest(indexName)).actionGet(); SearchResponse searchResponse = client().search(new SearchRequest(indexName)).actionGet(); @@ -146,7 +147,7 @@ public void testMappingValidationIndexExistsTemplateSubstitutions() throws IOExc assertThat(fields.size(), equalTo(1)); } - private void assertMappingsUpdatedFromComponentTemplateSubstitutions(String indexName, String indexTemplateName) { + private void assertMappingsUpdatedFromSubstitutions(String indexName, String indexTemplateName) { IndexRequest indexRequest1 = new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -159,7 +160,7 @@ private void assertMappingsUpdatedFromComponentTemplateSubstitutions(String inde """, XContentType.JSON).id(randomUUID()); { // First we use the original component template, and expect a failure in the second document: - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(indexRequest1); bulkRequest.add(indexRequest2); BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); @@ -192,6 +193,7 @@ private void assertMappingsUpdatedFromComponentTemplateSubstitutions(String inde ) ) ), + Map.of(), Map.of() ); bulkRequest.add(indexRequest1); @@ -226,7 +228,34 @@ private void assertMappingsUpdatedFromComponentTemplateSubstitutions(String inde ) ) ), - Map.of(indexTemplateName, Map.of("index_patterns", List.of(indexName), "composed_of", List.of("test-component-template-2"))) + Map.of( + indexTemplateName, + Map.of("index_patterns", List.of(indexName), "composed_of", List.of("test-component-template-2")) + ), + Map.of() + ); + bulkRequest.add(indexRequest1); + bulkRequest.add(indexRequest2); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + assertThat(response.getItems().length, equalTo(2)); + assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[0].getResponse()).getException()); + assertThat(response.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertNull(((SimulateIndexResponse) response.getItems()[1].getResponse()).getException()); + } + + { + /* + * Now we mapping_addition that defines both fields, so we expect no exception: + */ + BulkRequest bulkRequest = new SimulateBulkRequest( + Map.of(), + Map.of(), + Map.of(), + Map.of( + "_doc", + Map.of("dynamic", "strict", "properties", Map.of("foo1", Map.of("type", "text"), "foo3", Map.of("type", "text"))) + ) ); bulkRequest.add(indexRequest1); bulkRequest.add(indexRequest2); @@ -245,7 +274,7 @@ public void testMappingValidationIndexDoesNotExistsNoTemplate() { * mapping-less "random-index-template" created by the parent class), so we expect no mapping validation failure. */ String indexName = randomAlphaOfLength(20).toLowerCase(Locale.ROOT); - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -292,7 +321,7 @@ public void testMappingValidationIndexDoesNotExistsV2Template() throws IOExcepti request.indexTemplate(composableIndexTemplate); client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -324,7 +353,7 @@ public void testMappingValidationIndexDoesNotExistsV1Template() { indicesAdmin().putTemplate( new PutIndexTemplateRequest("test-template").patterns(List.of("my-index-*")).mapping("foo1", "type=integer") ).actionGet(); - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -378,7 +407,7 @@ public void testMappingValidationIndexDoesNotExistsDataStream() throws IOExcepti client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); { // First, try with no @timestamp to make sure we're picking up data-stream-specific templates - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "foo1": "baz" @@ -389,7 +418,8 @@ public void testMappingValidationIndexDoesNotExistsDataStream() throws IOExcepti "foo3": "baz" } """, XContentType.JSON).id(randomUUID())); - BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest).actionGet(); + BulkResponse response = client().execute(new ActionType(SimulateBulkAction.NAME), bulkRequest) + .actionGet(5, TimeUnit.SECONDS); assertThat(response.getItems().length, equalTo(2)); assertThat(response.getItems()[0].getResponse().getResult(), equalTo(DocWriteResponse.Result.CREATED)); assertThat( @@ -404,7 +434,7 @@ public void testMappingValidationIndexDoesNotExistsDataStream() throws IOExcepti } { // Now with @timestamp - BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); bulkRequest.add(new IndexRequest(indexName).source(""" { "@timestamp": "2024-08-27", diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index cde09d33516c9..7e06004e47cfb 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -178,6 +178,7 @@ static TransportVersion def(int id) { public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); + public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 78e603fba9be0..22cf8a2260d87 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -16,6 +16,7 @@ import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS; +import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; @@ -25,7 +26,8 @@ public Set getFeatures() { SIMULATE_MAPPING_VALIDATION, SIMULATE_MAPPING_VALIDATION_TEMPLATES, SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS, - SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS + SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS, + SIMULATE_MAPPING_ADDITION ); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java index 6fa22151396df..cc7fd431d8097 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -15,12 +15,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.util.HashMap; import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; /** * This extends BulkRequest with support for providing substitute pipeline definitions, component template definitions, and index template @@ -73,7 +73,8 @@ * } * } * } - * }, + * } + * }, * "index_template_substitutions": { * "my-index-template-1": { * "template": { @@ -84,6 +85,13 @@ * ] * } * } + * }, + * "mapping_addition": { + * "dynamic": "strict", + * "properties": { + * "foo": { + * "type": "keyword" + * } * } * * The pipelineSubstitutions Map held by this class is intended to be the result of XContentHelper.convertToMap(). The top-level keys @@ -94,6 +102,7 @@ public class SimulateBulkRequest extends BulkRequest { private final Map> pipelineSubstitutions; private final Map> componentTemplateSubstitutions; private final Map> indexTemplateSubstitutions; + private final Map mappingAddition; /** * @param pipelineSubstitutions The pipeline definitions that are to be used in place of any pre-existing pipeline definitions with @@ -103,16 +112,23 @@ public class SimulateBulkRequest extends BulkRequest { * component template definitions with the same name. * @param indexTemplateSubstitutions The index template definitions that are to be used in place of any pre-existing * index template definitions with the same name. + * @param mappingAddition A mapping that will be merged into the final index's mapping for mapping validation */ public SimulateBulkRequest( - @Nullable Map> pipelineSubstitutions, - @Nullable Map> componentTemplateSubstitutions, - @Nullable Map> indexTemplateSubstitutions + Map> pipelineSubstitutions, + Map> componentTemplateSubstitutions, + Map> indexTemplateSubstitutions, + Map mappingAddition ) { super(); + Objects.requireNonNull(pipelineSubstitutions); + Objects.requireNonNull(componentTemplateSubstitutions); + Objects.requireNonNull(indexTemplateSubstitutions); + Objects.requireNonNull(mappingAddition); this.pipelineSubstitutions = pipelineSubstitutions; this.componentTemplateSubstitutions = componentTemplateSubstitutions; this.indexTemplateSubstitutions = indexTemplateSubstitutions; + this.mappingAddition = mappingAddition; } @SuppressWarnings("unchecked") @@ -129,6 +145,11 @@ public SimulateBulkRequest(StreamInput in) throws IOException { } else { indexTemplateSubstitutions = Map.of(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { + this.mappingAddition = (Map) in.readGenericValue(); + } else { + mappingAddition = Map.of(); + } } @Override @@ -141,6 +162,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) { out.writeGenericValue(indexTemplateSubstitutions); } + if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { + out.writeGenericValue(mappingAddition); + } } public Map> getPipelineSubstitutions() { @@ -153,41 +177,39 @@ public boolean isSimulated() { } @Override - public Map getComponentTemplateSubstitutions() throws IOException { - if (componentTemplateSubstitutions == null) { - return Map.of(); - } - Map result = new HashMap<>(componentTemplateSubstitutions.size()); - for (Map.Entry> rawEntry : componentTemplateSubstitutions.entrySet()) { - result.put(rawEntry.getKey(), convertRawTemplateToComponentTemplate(rawEntry.getValue())); - } - return result; + public Map getComponentTemplateSubstitutions() { + return componentTemplateSubstitutions.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> convertRawTemplateToComponentTemplate(entry.getValue()))); } @Override - public Map getIndexTemplateSubstitutions() throws IOException { - if (indexTemplateSubstitutions == null) { - return Map.of(); - } - Map result = new HashMap<>(indexTemplateSubstitutions.size()); - for (Map.Entry> rawEntry : indexTemplateSubstitutions.entrySet()) { - result.put(rawEntry.getKey(), convertRawTemplateToIndexTemplate(rawEntry.getValue())); - } - return result; + public Map getIndexTemplateSubstitutions() { + return indexTemplateSubstitutions.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> convertRawTemplateToIndexTemplate(entry.getValue()))); + } + + public Map getMappingAddition() { + return mappingAddition; } - private static ComponentTemplate convertRawTemplateToComponentTemplate(Map rawTemplate) throws IOException { + private static ComponentTemplate convertRawTemplateToComponentTemplate(Map rawTemplate) { ComponentTemplate componentTemplate; try (var parser = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, rawTemplate)) { componentTemplate = ComponentTemplate.parse(parser); + } catch (IOException e) { + throw new RuntimeException(e); } return componentTemplate; } - private static ComposableIndexTemplate convertRawTemplateToIndexTemplate(Map rawTemplate) throws IOException { + private static ComposableIndexTemplate convertRawTemplateToIndexTemplate(Map rawTemplate) { ComposableIndexTemplate indexTemplate; try (var parser = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, rawTemplate)) { indexTemplate = ComposableIndexTemplate.parse(parser); + } catch (IOException e) { + throw new RuntimeException(e); } return indexTemplate; } @@ -197,7 +219,8 @@ public BulkRequest shallowClone() { BulkRequest bulkRequest = new SimulateBulkRequest( pipelineSubstitutions, componentTemplateSubstitutions, - indexTemplateSubstitutions + indexTemplateSubstitutions, + mappingAddition ); bulkRequest.setRefreshPolicy(getRefreshPolicy()); bulkRequest.waitForActiveShards(waitForActiveShards()); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index d7c555879c00f..0888b70f5399c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -26,10 +26,13 @@ import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; @@ -37,6 +40,7 @@ import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -50,6 +54,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.HashMap; @@ -75,6 +83,7 @@ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { "simulate.component.template.substitutions" ); public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions"); + public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition"); private final IndicesService indicesService; private final NamedXContentRegistry xContentRegistry; private final Set indexSettingProviders; @@ -122,11 +131,17 @@ protected void doInternalExecute( final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); Map componentTemplateSubstitutions = bulkRequest.getComponentTemplateSubstitutions(); Map indexTemplateSubstitutions = bulkRequest.getIndexTemplateSubstitutions(); + Map mappingAddition = ((SimulateBulkRequest) bulkRequest).getMappingAddition(); for (int i = 0; i < bulkRequest.requests.size(); i++) { DocWriteRequest docRequest = bulkRequest.requests.get(i); assert docRequest instanceof IndexRequest : "TransportSimulateBulkAction should only ever be called with IndexRequests"; IndexRequest request = (IndexRequest) docRequest; - Exception mappingValidationException = validateMappings(componentTemplateSubstitutions, indexTemplateSubstitutions, request); + Exception mappingValidationException = validateMappings( + componentTemplateSubstitutions, + indexTemplateSubstitutions, + mappingAddition, + request + ); responses.set( i, BulkItemResponse.success( @@ -159,6 +174,7 @@ protected void doInternalExecute( private Exception validateMappings( Map componentTemplateSubstitutions, Map indexTemplateSubstitutions, + Map mappingAddition, IndexRequest request ) { final SourceToParse sourceToParse = new SourceToParse( @@ -174,7 +190,10 @@ private Exception validateMappings( Exception mappingValidationException = null; IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(request.index()); try { - if (indexAbstraction != null && componentTemplateSubstitutions.isEmpty() && indexTemplateSubstitutions.isEmpty()) { + if (indexAbstraction != null + && componentTemplateSubstitutions.isEmpty() + && indexTemplateSubstitutions.isEmpty() + && mappingAddition.isEmpty()) { /* * In this case the index exists and we don't have any component template overrides. So we can just use withTempIndexService * to do the mapping validation, using all the existing logic for validation. @@ -250,36 +269,8 @@ private Exception validateMappings( indexSettingProviders ); CompressedXContent mappings = template.mappings(); - if (mappings != null) { - MappingMetadata mappingMetadata = new MappingMetadata(mappings); - Settings dummySettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .build(); - final IndexMetadata imd = IndexMetadata.builder(request.index()) - .settings(dummySettings) - .putMapping(mappingMetadata) - .build(); - indicesService.withTempIndexService(imd, indexService -> { - indexService.mapperService().updateMapping(null, imd); - return IndexShard.prepareIndex( - indexService.mapperService(), - sourceToParse, - SequenceNumbers.UNASSIGNED_SEQ_NO, - -1, - -1, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - Long.MIN_VALUE, - false, - request.ifSeqNo(), - request.ifPrimaryTerm(), - 0 - ); - }); - } + CompressedXContent mergedMappings = mergeMappings(mappings, mappingAddition); + validateUpdatedMappings(mappings, mergedMappings, request, sourceToParse); } else { List matchingTemplates = findV1Templates(simulatedState.metadata(), request.index(), false); final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( @@ -287,40 +278,8 @@ private Exception validateMappings( matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), xContentRegistry ); - final CompressedXContent combinedMappings; - if (mappingsMap.isEmpty()) { - combinedMappings = null; - } else { - combinedMappings = new CompressedXContent(mappingsMap); - } - Settings dummySettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .build(); - MappingMetadata mappingMetadata = combinedMappings == null ? null : new MappingMetadata(combinedMappings); - final IndexMetadata imd = IndexMetadata.builder(request.index()) - .putMapping(mappingMetadata) - .settings(dummySettings) - .build(); - indicesService.withTempIndexService(imd, indexService -> { - indexService.mapperService().updateMapping(null, imd); - return IndexShard.prepareIndex( - indexService.mapperService(), - sourceToParse, - SequenceNumbers.UNASSIGNED_SEQ_NO, - -1, - -1, - VersionType.INTERNAL, - Engine.Operation.Origin.PRIMARY, - Long.MIN_VALUE, - false, - request.ifSeqNo(), - request.ifPrimaryTerm(), - 0 - ); - }); + final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); } } } catch (Exception e) { @@ -329,6 +288,66 @@ private Exception validateMappings( return mappingValidationException; } + /* + * Validates that when updatedMappings are applied + */ + private void validateUpdatedMappings( + @Nullable CompressedXContent originalMappings, + @Nullable CompressedXContent updatedMappings, + IndexRequest request, + SourceToParse sourceToParse + ) throws IOException { + if (updatedMappings == null) { + return; // no validation to do + } + Settings dummySettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + IndexMetadata.Builder originalIndexMetadataBuilder = IndexMetadata.builder(request.index()).settings(dummySettings); + if (originalMappings != null) { + originalIndexMetadataBuilder.putMapping(new MappingMetadata(originalMappings)); + } + final IndexMetadata originalIndexMetadata = originalIndexMetadataBuilder.build(); + final IndexMetadata updatedIndexMetadata = IndexMetadata.builder(request.index()) + .settings(dummySettings) + .putMapping(new MappingMetadata(updatedMappings)) + .build(); + indicesService.withTempIndexService(originalIndexMetadata, indexService -> { + indexService.mapperService().merge(updatedIndexMetadata, MapperService.MergeReason.MAPPING_UPDATE); + return IndexShard.prepareIndex( + indexService.mapperService(), + sourceToParse, + SequenceNumbers.UNASSIGNED_SEQ_NO, + -1, + -1, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + Long.MIN_VALUE, + false, + request.ifSeqNo(), + request.ifPrimaryTerm(), + 0 + ); + }); + } + + private static CompressedXContent mergeMappings(@Nullable CompressedXContent originalMapping, Map mappingAddition) + throws IOException { + Map combinedMappingMap = new HashMap<>(); + if (originalMapping != null) { + combinedMappingMap.putAll(XContentHelper.convertToMap(originalMapping.uncompressed(), true, XContentType.JSON).v2()); + } + XContentHelper.update(combinedMappingMap, mappingAddition, true); + if (combinedMappingMap.isEmpty()) { + return null; + } else { + return convertMappingMapToXContent(combinedMappingMap); + } + } + /* * This overrides TransportSimulateBulkAction's getIngestService to allow us to provide an IngestService that handles pipeline * substitutions defined in the request. @@ -344,4 +363,25 @@ protected Boolean resolveFailureStore(String indexName, Metadata metadata, long // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store return null; } + + private static CompressedXContent convertMappingMapToXContent(Map rawAdditionalMapping) throws IOException { + CompressedXContent compressedXContent; + if (rawAdditionalMapping == null || rawAdditionalMapping.isEmpty()) { + compressedXContent = null; + } else { + try (var parser = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, rawAdditionalMapping)) { + compressedXContent = mappingFromXContent(parser); + } + } + return compressedXContent; + } + + private static CompressedXContent mappingFromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + return new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(parser.mapOrdered()))); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); + } + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java index 680860332fe74..c825a8198e6e4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java @@ -74,10 +74,21 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String defaultPipeline = request.param("pipeline"); Tuple sourceTuple = request.contentOrSourceParam(); Map sourceMap = XContentHelper.convertToMap(sourceTuple.v2(), false, sourceTuple.v1()).v2(); + Map> pipelineSubstitutions = (Map>) sourceMap.remove( + "pipeline_substitutions" + ); + Map> componentTemplateSubstitutions = (Map>) sourceMap.remove( + "component_template_substitutions" + ); + Map> indexTemplateSubstitutions = (Map>) sourceMap.remove( + "index_template_substitutions" + ); + Object mappingAddition = sourceMap.remove("mapping_addition"); SimulateBulkRequest bulkRequest = new SimulateBulkRequest( - (Map>) sourceMap.remove("pipeline_substitutions"), - (Map>) sourceMap.remove("component_template_substitutions"), - (Map>) sourceMap.remove("index_template_substitutions") + pipelineSubstitutions == null ? Map.of() : pipelineSubstitutions, + componentTemplateSubstitutions == null ? Map.of() : componentTemplateSubstitutions, + indexTemplateSubstitutions == null ? Map.of() : indexTemplateSubstitutions, + mappingAddition == null ? Map.of() : Map.of("_doc", mappingAddition) ); BytesReference transformedData = convertToBulkRequestXContentBytes(sourceMap); bulkRequest.add( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java index c94e4e46c9ee3..1e651791eb18a 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/SimulateBulkRequestTests.java @@ -22,32 +22,74 @@ import java.util.List; import java.util.Map; +import static java.util.Map.entry; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class SimulateBulkRequestTests extends ESTestCase { public void testSerialization() throws Exception { - testSerialization(getTestPipelineSubstitutions(), getTestComponentTemplateSubstitutions(), getTestIndexTemplateSubstitutions()); - testSerialization(getTestPipelineSubstitutions(), null, null); - testSerialization(getTestPipelineSubstitutions(), getTestComponentTemplateSubstitutions(), null); - testSerialization(getTestPipelineSubstitutions(), null, getTestIndexTemplateSubstitutions()); - testSerialization(null, getTestComponentTemplateSubstitutions(), getTestIndexTemplateSubstitutions()); - testSerialization(null, getTestComponentTemplateSubstitutions(), null); - testSerialization(null, null, getTestIndexTemplateSubstitutions()); - testSerialization(null, null, null); - testSerialization(Map.of(), Map.of(), Map.of()); + testSerialization( + getMapOrEmpty(getTestPipelineSubstitutions()), + getMapOrEmpty(getTestComponentTemplateSubstitutions()), + getMapOrEmpty(getTestIndexTemplateSubstitutions()), + getMapOrEmpty(getTestMappingAddition()) + ); + } + + private Map getMapOrEmpty(Map map) { + if (randomBoolean()) { + return map; + } else { + return Map.of(); + } + } + + public void testNullsNotAllowed() { + assertThrows( + NullPointerException.class, + () -> new SimulateBulkRequest( + null, + getTestPipelineSubstitutions(), + getTestComponentTemplateSubstitutions(), + getTestMappingAddition() + ) + ); + assertThrows( + NullPointerException.class, + () -> new SimulateBulkRequest( + getTestPipelineSubstitutions(), + null, + getTestComponentTemplateSubstitutions(), + getTestMappingAddition() + ) + ); + assertThrows( + NullPointerException.class, + () -> new SimulateBulkRequest(getTestPipelineSubstitutions(), getTestPipelineSubstitutions(), null, getTestMappingAddition()) + ); + assertThrows( + NullPointerException.class, + () -> new SimulateBulkRequest( + getTestPipelineSubstitutions(), + getTestPipelineSubstitutions(), + getTestComponentTemplateSubstitutions(), + null + ) + ); } private void testSerialization( Map> pipelineSubstitutions, Map> componentTemplateSubstitutions, - Map> indexTemplateSubstitutions + Map> indexTemplateSubstitutions, + Map mappingAddition ) throws IOException { SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest( pipelineSubstitutions, componentTemplateSubstitutions, - indexTemplateSubstitutions + indexTemplateSubstitutions, + mappingAddition ); /* * Note: SimulateBulkRequest does not implement equals or hashCode, so we can't test serialization in the usual way for a @@ -59,7 +101,7 @@ private void testSerialization( @SuppressWarnings({ "unchecked", "rawtypes" }) public void testGetComponentTemplateSubstitutions() throws IOException { - SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); assertThat(simulateBulkRequest.getComponentTemplateSubstitutions(), equalTo(Map.of())); String substituteComponentTemplatesString = """ { @@ -93,7 +135,7 @@ public void testGetComponentTemplateSubstitutions() throws IOException { XContentType.JSON ).v2(); Map> substituteComponentTemplates = (Map>) tempMap; - simulateBulkRequest = new SimulateBulkRequest(Map.of(), substituteComponentTemplates, Map.of()); + simulateBulkRequest = new SimulateBulkRequest(Map.of(), substituteComponentTemplates, Map.of(), Map.of()); Map componentTemplateSubstitutions = simulateBulkRequest.getComponentTemplateSubstitutions(); assertThat(componentTemplateSubstitutions.size(), equalTo(2)); assertThat( @@ -118,7 +160,7 @@ public void testGetComponentTemplateSubstitutions() throws IOException { } public void testGetIndexTemplateSubstitutions() throws IOException { - SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of()); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); assertThat(simulateBulkRequest.getIndexTemplateSubstitutions(), equalTo(Map.of())); String substituteIndexTemplatesString = """ { @@ -154,7 +196,7 @@ public void testGetIndexTemplateSubstitutions() throws IOException { randomBoolean(), XContentType.JSON ).v2(); - simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), substituteIndexTemplates); + simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), substituteIndexTemplates, Map.of()); Map indexTemplateSubstitutions = simulateBulkRequest.getIndexTemplateSubstitutions(); assertThat(indexTemplateSubstitutions.size(), equalTo(2)); assertThat( @@ -179,7 +221,8 @@ public void testShallowClone() throws IOException { SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest( getTestPipelineSubstitutions(), getTestComponentTemplateSubstitutions(), - getTestIndexTemplateSubstitutions() + getTestIndexTemplateSubstitutions(), + getTestMappingAddition() ); simulateBulkRequest.setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); simulateBulkRequest.waitForActiveShards(randomIntBetween(1, 10)); @@ -204,7 +247,6 @@ public void testShallowClone() throws IOException { assertThat(shallowCopy.routing(), equalTo(simulateBulkRequest.routing())); assertThat(shallowCopy.requireAlias(), equalTo(simulateBulkRequest.requireAlias())); assertThat(shallowCopy.requireDataStream(), equalTo(simulateBulkRequest.requireDataStream())); - } private static Map> getTestPipelineSubstitutions() { @@ -248,4 +290,22 @@ private static Map> getTestIndexTemplateSubstitution Map.of("template", Map.of("index_patterns", List.of("foo*", "bar*"), "mappings", Map.of(), "settings", Map.of())) ); } + + private static Map getTestMappingAddition() { + return Map.ofEntries( + entry( + "_doc", + Map.ofEntries( + entry("dynamic", "strict"), + entry( + "properties", + Map.ofEntries( + entry("foo", Map.ofEntries(entry("type", "keyword"))), + entry("bar", Map.ofEntries(entry("type", "boolean"))) + ) + ) + ) + ) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java index 71bc31334920e..63d308e1579f3 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionTests.java @@ -135,7 +135,7 @@ public void tearDown() throws Exception { public void testIndexData() throws IOException { Task task = mock(Task.class); // unused - BulkRequest bulkRequest = new SimulateBulkRequest(null, null, null); + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); int bulkItemCount = randomIntBetween(0, 200); for (int i = 0; i < bulkItemCount; i++) { Map source = Map.of(randomAlphaOfLength(10), randomAlphaOfLength(5)); @@ -218,7 +218,11 @@ public void testIndexDataWithValidation() throws IOException { * (7) An indexing request to a nonexistent index that matches no templates */ Task task = mock(Task.class); // unused - BulkRequest bulkRequest = new SimulateBulkRequest(null, null, null); + /* + * Here we only add a mapping_addition because if there is no mapping at all TransportSimulateBulkAction skips mapping validation + * altogether, and we need it to run for this test to pass. + */ + BulkRequest bulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of("_doc", Map.of("dynamic", "strict"))); int bulkItemCount = randomIntBetween(0, 200); Map indicesMap = new HashMap<>(); Map v1Templates = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 3b3f5bdc747b5..94b3607bd7608 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -65,7 +65,7 @@ public void testGetPipeline() { ingestService.innerUpdatePipelines(ingestMetadata); { // First we make sure that if there are no substitutions that we get our original pipeline back: - SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(null, null, null); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(Map.of(), Map.of(), Map.of(), Map.of()); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline = simulateIngestService.getPipeline("pipeline1"); assertThat(pipeline.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor1")))); @@ -83,7 +83,7 @@ public void testGetPipeline() { ); pipelineSubstitutions.put("pipeline2", newHashMap("processors", List.of(newHashMap("processor3", Collections.emptyMap())))); - SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions, null, null); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions, Map.of(), Map.of(), Map.of()); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); assertThat( @@ -103,7 +103,7 @@ public void testGetPipeline() { */ Map> pipelineSubstitutions = new HashMap<>(); pipelineSubstitutions.put("pipeline2", newHashMap("processors", List.of(newHashMap("processor3", Collections.emptyMap())))); - SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions, null, null); + SimulateBulkRequest simulateBulkRequest = new SimulateBulkRequest(pipelineSubstitutions, Map.of(), Map.of(), Map.of()); SimulateIngestService simulateIngestService = new SimulateIngestService(ingestService, simulateBulkRequest); Pipeline pipeline1 = simulateIngestService.getPipeline("pipeline1"); assertThat(pipeline1.getProcessors(), contains(transformedMatch(Processor::getType, equalTo("processor1")))); From f6c0a245fd15519990316f1e17e3e30ef0d31662 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 22 Oct 2024 11:20:22 +1100 Subject: [PATCH 002/202] [Test] Add client param indexExists (#115180) This allows us to use the admin client to easily check whether an index exists (that may not be visible to the standard client) --- .../java/org/elasticsearch/test/rest/ESRestTestCase.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index d17016f850300..22f93e6bda61f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1896,7 +1896,11 @@ protected Map getIndexMappingAsMap(String index) throws IOExcept } protected static boolean indexExists(String index) throws IOException { - Response response = client().performRequest(new Request("HEAD", "/" + index)); + return indexExists(client(), index); + } + + protected static boolean indexExists(RestClient client, String index) throws IOException { + Response response = client.performRequest(new Request("HEAD", "/" + index)); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); } From aff4edd51bcb32c6478056087492c73890f8cf23 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 22 Oct 2024 07:21:18 +0200 Subject: [PATCH 003/202] Don't use a BytesStreamOutput to copy keys in BytesRefBlockHash (#114819) Removes he size limit on BytesStreamOutput when copying keys. --- docs/changelog/114819.yaml | 6 +++++ .../blockhash/BytesRefBlockHash.java | 20 +++++------------ .../aggregation/blockhash/X-BlockHash.java.st | 22 +++++-------------- 3 files changed, 16 insertions(+), 32 deletions(-) create mode 100644 docs/changelog/114819.yaml diff --git a/docs/changelog/114819.yaml b/docs/changelog/114819.yaml new file mode 100644 index 0000000000000..f8d03f7024801 --- /dev/null +++ b/docs/changelog/114819.yaml @@ -0,0 +1,6 @@ +pr: 114819 +summary: Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` +area: EQL +type: bug +issues: + - 114599 diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java index 3c5bf2c18c915..b8ea7658a8247 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java @@ -8,12 +8,9 @@ package org.elasticsearch.compute.aggregation.blockhash; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; -import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; @@ -30,8 +27,6 @@ import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeInt; import org.elasticsearch.core.ReleasableIterator; -import java.io.IOException; - /** * Maps a {@link BytesRefBlock} column to group ids. * This class is generated. Do not edit it. @@ -197,26 +192,21 @@ public BytesRefBlock[] getKeys() { * without and still read from the block. */ // TODO replace with takeBytesRefsOwnership ?! + final BytesRef spare = new BytesRef(); if (seenNull) { try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size() + 1))) { builder.appendNull(); - BytesRef spare = new BytesRef(); for (long i = 0; i < hash.size(); i++) { builder.appendBytesRef(hash.get(i, spare)); } return new BytesRefBlock[] { builder.build() }; } } - - final int size = Math.toIntExact(hash.size()); - try (BytesStreamOutput out = new BytesStreamOutput()) { - hash.getBytesRefs().writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new BytesRefBlock[] { - blockFactory.newBytesRefArrayVector(new BytesRefArray(in, BigArrays.NON_RECYCLING_INSTANCE), size).asBlock() }; + try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size()))) { + for (long i = 0; i < hash.size(); i++) { + builder.appendBytesRef(hash.get(i, spare)); } - } catch (IOException e) { - throw new IllegalStateException(e); + return new BytesRefBlock[] { builder.build() }; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st index 7c21cff56d7bb..2a3d1143236ac 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st @@ -9,15 +9,10 @@ package org.elasticsearch.compute.aggregation.blockhash; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; $endif$ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; -$if(BytesRef)$ -import org.elasticsearch.common.util.BytesRefArray; -$endif$ import org.elasticsearch.common.util.$Hash$; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; @@ -58,8 +53,6 @@ $endif$ import org.elasticsearch.core.ReleasableIterator; $if(BytesRef)$ -import java.io.IOException; - $else$ import java.util.BitSet; @@ -250,26 +243,21 @@ $if(BytesRef)$ * without and still read from the block. */ // TODO replace with takeBytesRefsOwnership ?! + final BytesRef spare = new BytesRef(); if (seenNull) { try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size() + 1))) { builder.appendNull(); - BytesRef spare = new BytesRef(); for (long i = 0; i < hash.size(); i++) { builder.appendBytesRef(hash.get(i, spare)); } return new BytesRefBlock[] { builder.build() }; } } - - final int size = Math.toIntExact(hash.size()); - try (BytesStreamOutput out = new BytesStreamOutput()) { - hash.getBytesRefs().writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new BytesRefBlock[] { - blockFactory.newBytesRefArrayVector(new BytesRefArray(in, BigArrays.NON_RECYCLING_INSTANCE), size).asBlock() }; + try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size()))) { + for (long i = 0; i < hash.size(); i++) { + builder.appendBytesRef(hash.get(i, spare)); } - } catch (IOException e) { - throw new IllegalStateException(e); + return new BytesRefBlock[] { builder.build() }; } $else$ if (seenNull) { From 013760cf4a408403044dc935122cf521d63dd92a Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 22 Oct 2024 07:37:28 +0200 Subject: [PATCH 004/202] Grow internal arrays when growing the capacity in AbstractHash implementations (#114907) This commit resizes those arrays when incrementing the capacity of the hashes to the maxSize. --- .../java/org/elasticsearch/common/util/BytesRefHash.java | 6 +++--- .../main/java/org/elasticsearch/common/util/LongHash.java | 4 ++-- .../java/org/elasticsearch/common/util/LongLongHash.java | 4 ++-- .../compute/aggregation/blockhash/BlockHashTests.java | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java index 208d29edad71d..288462ba3bbcb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java @@ -48,7 +48,7 @@ public BytesRefHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { boolean success = false; try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - this.hashes = bigArrays.newIntArray(capacity, false); + this.hashes = bigArrays.newIntArray(maxSize, false); this.bytesRefs = new BytesRefArray(capacity, bigArrays); success = true; } finally { @@ -98,7 +98,7 @@ public BytesRefHash(BytesRefArray bytesRefs, float maxLoadFactor, BigArrays bigA boolean success = false; try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - this.hashes = bigArrays.newIntArray(bytesRefs.size() + 1, false); + this.hashes = bigArrays.newIntArray(maxSize, false); this.bytesRefs = BytesRefArray.takeOwnershipOf(bytesRefs); success = true; } finally { @@ -182,7 +182,6 @@ private long set(BytesRef key, int code, long id) { private void append(long id, BytesRef key, int code) { assert size == id; bytesRefs.append(key); - hashes = bigArrays.grow(hashes, id + 1); hashes.set(id, code); } @@ -211,6 +210,7 @@ public long add(BytesRef key, int code) { if (size >= maxSize) { assert size == maxSize; grow(); + hashes = bigArrays.resize(hashes, maxSize); } assert size < maxSize; return set(key, rehash(code), size); diff --git a/server/src/main/java/org/elasticsearch/common/util/LongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongHash.java index 0c681063c50b0..3eeb60e419a19 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongHash.java @@ -33,7 +33,7 @@ public LongHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { super(capacity, maxLoadFactor, bigArrays); try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - keys = bigArrays.newLongArray(capacity, false); + keys = bigArrays.newLongArray(maxSize, false); } finally { if (keys == null) { close(); @@ -78,7 +78,6 @@ private long set(long key, long id) { } private void append(long id, long key) { - keys = bigArrays.grow(keys, id + 1); keys.set(id, key); } @@ -102,6 +101,7 @@ public long add(long key) { if (size >= maxSize) { assert size == maxSize; grow(); + keys = bigArrays.resize(keys, maxSize); } assert size < maxSize; return set(key, size); diff --git a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java index f7708af59dde2..031794ed9c9c6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java @@ -40,7 +40,7 @@ public LongLongHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { super(capacity, maxLoadFactor, bigArrays); try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - keys = bigArrays.newLongArray(2 * capacity, false); + keys = bigArrays.newLongArray(2 * maxSize, false); } finally { if (keys == null) { close(); @@ -99,7 +99,6 @@ private long set(long key1, long key2, long id) { private void append(long id, long key1, long key2) { long keyOffset = 2 * id; - keys = bigArrays.grow(keys, keyOffset + 2); keys.set(keyOffset, key1); keys.set(keyOffset + 1, key2); } @@ -128,6 +127,7 @@ public long add(long key1, long key2) { if (size >= maxSize) { assert size == maxSize; grow(); + keys = bigArrays.resize(keys, maxSize * 2); } assert size < maxSize; return set(key1, key2, size); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index aeea18e52da0f..088e791348840 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -1147,7 +1147,7 @@ public void testLongBytesRefHashWithMultiValuedFields() { } else { assertThat( ordsAndKeys.description, - equalTo("BytesRefLongBlockHash{keys=[BytesRefKey[channel=1], LongKey[channel=0]], entries=9, size=491b}") + equalTo("BytesRefLongBlockHash{keys=[BytesRefKey[channel=1], LongKey[channel=0]], entries=9, size=483b}") ); assertOrds( ordsAndKeys.ords, From 185bf683787008495cc417a806675654ecb9e996 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 22 Oct 2024 08:58:21 +0200 Subject: [PATCH 005/202] Add prefilters only once in the compound and text similarity retrievers (#114983) This change ensures that the prefilters are propagated in the downstream retrievers only once. It also removes the ability to extends `explainQuery` in the compound retriever. This is not needed as the rank docs are now responsible for the explanation. --- .../retriever/CompoundRetrieverBuilder.java | 18 ++++++------------ .../TextSimilarityRankRetrieverBuilder.java | 19 +------------------ 2 files changed, 7 insertions(+), 30 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index e994c55e43452..85dabf6eb6465 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportMultiSearchAction; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -163,6 +162,11 @@ public final QueryBuilder topDocsQuery() { throw new IllegalStateException("Should not be called, missing a rewrite?"); } + @Override + public final QueryBuilder explainQuery() { + throw new IllegalStateException("Should not be called, missing a rewrite?"); + } + @Override public final void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { throw new IllegalStateException("Should not be called, missing a rewrite?"); @@ -216,22 +220,12 @@ protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) .size(rankWindowSize); + // apply the pre-filters downstream once if (preFilterQueryBuilders.isEmpty() == false) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); } retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - // apply the pre-filters - if (preFilterQueryBuilders.size() > 0) { - QueryBuilder query = sourceBuilder.query(); - BoolQueryBuilder newQuery = new BoolQueryBuilder(); - if (query != null) { - newQuery.must(query); - } - preFilterQueryBuilders.forEach(newQuery::filter); - sourceBuilder.query(newQuery); - } - // Record the shard id in the sort result List> sortBuilders = sourceBuilder.sorts() != null ? new ArrayList<>(sourceBuilder.sorts()) : new ArrayList<>(); if (sortBuilders.isEmpty()) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 8bccf6e7d1022..342199dc51db8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -20,7 +19,6 @@ import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -158,33 +156,18 @@ protected RankDoc[] combineInnerRetrieverResults(List rankResults) { return textSimilarityRankDocs; } - @Override - public QueryBuilder explainQuery() { - // the original matching set of the TextSimilarityRank retriever is specified by its nested retriever - return new RankDocsQueryBuilder(rankDocs, new QueryBuilder[] { innerRetrievers.getFirst().retriever().explainQuery() }, true); - } - @Override protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) .size(rankWindowSize); + // apply the pre-filters downstream once if (preFilterQueryBuilders.isEmpty() == false) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); } retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - // apply the pre-filters - if (preFilterQueryBuilders.size() > 0) { - QueryBuilder query = sourceBuilder.query(); - BoolQueryBuilder newQuery = new BoolQueryBuilder(); - if (query != null) { - newQuery.must(query); - } - preFilterQueryBuilders.forEach(newQuery::filter); - sourceBuilder.query(newQuery); - } sourceBuilder.rankBuilder( new TextSimilarityRankBuilder(this.field, this.inferenceId, this.inferenceText, this.rankWindowSize, this.minScore) ); From e6e147e93b178391ca001913abac140e99cace78 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Tue, 22 Oct 2024 10:36:06 +0300 Subject: [PATCH 006/202] Adjust failure store to work with TSDS (#114307) In this PR we add a test and we fix the issues we encountered when we enabled the failure store for TSDS and logsdb. **Logsdb** Logsdb worked out of the box, so we just added the test that indexes with a bulk request a couple of documents and tests how they are ingested. **TSDS** Here it was a bit trickier. We encountered the following issues: - TSDS requires a timestamp to determine the write index of the data stream meaning the failure happens earlier than we have anticipated so far. We added a special exception to detect this case and we treat it accordingly. - The template of a TSDS data stream sets certain settings that we do not want to have in the failure store index. We added an allowlist that gets applied before we add the necessary index settings. Furthermore, we added a test case to capture this. --- .../datastreams/TSDBIndexingIT.java | 3 +- .../datastreams/DataStreamFeatures.java | 6 ++ .../test/data_stream/150_tsdb.yml | 101 ++++++++++++++++++ .../190_failure_store_redirection.yml | 2 +- .../CreateIndexClusterStateUpdateRequest.java | 12 +++ .../action/bulk/BulkOperation.java | 7 ++ .../cluster/metadata/DataStream.java | 24 ++++- .../DataStreamFailureStoreDefinition.java | 47 ++++++-- .../MetadataCreateDataStreamService.java | 3 +- .../metadata/MetadataCreateIndexService.java | 7 +- ...DataStreamFailureStoreDefinitionTests.java | 73 +++++++++++++ .../rest-api-spec/test/20_failure_store.yml | 99 +++++++++++++++++ 12 files changed, 369 insertions(+), 15 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java create mode 100644 x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index a2557a4de6e6d..29ec326548f2b 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.IndexDocFailureStoreStatus; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; @@ -170,7 +171,7 @@ public void testTimeRanges() throws Exception { var indexRequest = new IndexRequest("k8s").opType(DocWriteRequest.OpType.CREATE); time = randomBoolean() ? endTime : endTime.plusSeconds(randomIntBetween(1, 99)); indexRequest.source(DOC.replace("$time", formatInstant(time)), XContentType.JSON); - expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + expectThrows(IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus.class, () -> client().index(indexRequest).actionGet()); } // Fetch UpdateTimeSeriesRangeService and increment time range of latest backing index: diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index ab7e590b1631e..f60a3e5c47a7f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -26,6 +26,7 @@ public class DataStreamFeatures implements FeatureSpecification { public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); + public static final NodeFeature DATA_STREAM_FAILURE_STORE_TSDB_FIX = new NodeFeature("data_stream.failure_store.tsdb_fix"); @Override public Map getHistoricalFeatures() { @@ -41,4 +42,9 @@ public Set getFeatures() { DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 ); } + + @Override + public Set getTestFeatures() { + return Set.of(DATA_STREAM_FAILURE_STORE_TSDB_FIX); + } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 56f387c016261..de5cf3baa744e 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -182,6 +182,107 @@ index without timestamp: body: - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' +--- +TSDB failures go to failure store: + - requires: + cluster_features: ["data_stream.failure_store.tsdb_fix"] + reason: "tests tsdb failure store fixes in 8.16.0 that catch timestamp errors that happen earlier in the process and redirect them to the failure store." + + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [fs-k8s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [ "fs-k8s*" ] + data_stream: + failure_store: true + template: + settings: + index: + mode: time_series + number_of_replicas: 1 + number_of_shards: 2 + routing_path: [ metricset, time_series_dimension ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + index: + index: fs-k8s + body: + - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - match: { result : "created"} + - match: { failure_store : "used"} + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "fs-k8s"} }' + - '{"@timestamp":"2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{ "@timestamp": "2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "fs-k8s"} }' + - '{ "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "fs-k8s"} }' + - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - is_true: errors + + # Successfully indexed to backing index + - match: { items.0.create._index: '/\.ds-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.0.create.status: 201 } + - is_false: items.0.create.failure_store + - match: { items.1.create._index: '/\.ds-k8s-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.1.create.status: 201 } + - is_false: items.1.create.failure_store + + # Successfully indexed to failure store + - match: { items.2.create._index: '/\.fs-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.2.create.status: 201 } + - match: { items.2.create.failure_store: used } + - match: { items.3.create._index: '/\.fs-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.3.create.status: 201 } + - match: { items.3.create.failure_store: used } + + # Rejected, eligible to go to failure store, but failure store not enabled + - match: { items.4.create._index: 'k8s' } + - match: { items.4.create.status: 400 } + - match: { items.4.create.error.type: timestamp_error } + - match: { items.4.create.failure_store: not_enabled } + - match: { items.4.create._index: 'k8s' } + - match: { items.4.create.status: 400 } + - match: { items.4.create.error.type: timestamp_error } + - match: { items.4.create.failure_store: not_enabled } + --- index without timestamp with pipeline: - do: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index cb5578a282dc9..9b5a9dae8bc0a 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -879,7 +879,7 @@ teardown: # Successfully indexed to backing index - match: { items.0.create._index: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { items.0.create.status: 201 } - - is_false: items.1.create.failure_store + - is_false: items.0.create.failure_store # Rejected but not eligible to go to failure store - match: { items.1.create._index: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 080ebb5951a7a..553f784d23a87 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -35,6 +35,7 @@ public class CreateIndexClusterStateUpdateRequest { private ResizeType resizeType; private boolean copySettings; private SystemDataStreamDescriptor systemDataStreamDescriptor; + private boolean isFailureIndex = false; private Settings settings = Settings.EMPTY; @@ -102,6 +103,11 @@ public CreateIndexClusterStateUpdateRequest systemDataStreamDescriptor(SystemDat return this; } + public CreateIndexClusterStateUpdateRequest isFailureIndex(boolean isFailureIndex) { + this.isFailureIndex = isFailureIndex; + return this; + } + public String cause() { return cause; } @@ -168,6 +174,10 @@ public String dataStreamName() { return dataStreamName; } + public boolean isFailureIndex() { + return isFailureIndex; + } + public CreateIndexClusterStateUpdateRequest dataStreamName(String dataStreamName) { this.dataStreamName = dataStreamName; return this; @@ -228,6 +238,8 @@ public String toString() { + systemDataStreamDescriptor + ", matchingTemplate=" + matchingTemplate + + ", isFailureIndex=" + + isFailureIndex + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 007f274d7f493..130d6286f7e02 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -320,6 +320,12 @@ private Map> groupRequestsByShards( shard -> new ArrayList<>() ); shardRequests.add(bulkItemRequest); + } catch (DataStream.TimestampError timestampError) { + IndexDocFailureStoreStatus failureStoreStatus = processFailure(bulkItemRequest, clusterState, timestampError); + if (IndexDocFailureStoreStatus.USED.equals(failureStoreStatus) == false) { + String name = ia != null ? ia.getName() : docWriteRequest.index(); + addFailureAndDiscardRequest(docWriteRequest, bulkItemRequest.id(), name, timestampError, failureStoreStatus); + } } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException | ResourceNotFoundException e) { String name = ia != null ? ia.getName() : docWriteRequest.index(); var failureStoreStatus = isFailureStoreRequest(docWriteRequest) @@ -545,6 +551,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques boolean added = addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); if (added) { failureStoreMetrics.incrementFailureStore(bulkItemRequest.index(), errorType, FailureStoreMetrics.ErrorLocation.SHARD); + return IndexDocFailureStoreStatus.USED; } else { failureStoreMetrics.incrementRejected( bulkItemRequest.index(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index bedf65e1a9c8b..4dcc7c73c280e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -1343,7 +1343,7 @@ public Index getWriteIndex(IndexRequest request, Metadata metadata) { + "]" ) .collect(Collectors.joining()); - throw new IllegalArgumentException( + throw new TimestampError( "the document timestamp [" + timestampAsString + "] is outside of ranges of currently writable indices [" @@ -1405,10 +1405,10 @@ private static Instant getTimeStampFromRaw(Object rawTimestamp) { } else if (rawTimestamp instanceof String sTimestamp) { return DateFormatters.from(TIMESTAMP_FORMATTER.parse(sTimestamp), TIMESTAMP_FORMATTER.locale()).toInstant(); } else { - throw new IllegalArgumentException("timestamp [" + rawTimestamp + "] type [" + rawTimestamp.getClass() + "] error"); + throw new TimestampError("timestamp [" + rawTimestamp + "] type [" + rawTimestamp.getClass() + "] error"); } } catch (Exception e) { - throw new IllegalArgumentException("Error get data stream timestamp field: " + e.getMessage(), e); + throw new TimestampError("Error get data stream timestamp field: " + e.getMessage(), e); } } @@ -1432,7 +1432,7 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp ); }; } catch (Exception e) { - throw new IllegalArgumentException("Error extracting data stream timestamp field: " + e.getMessage(), e); + throw new TimestampError("Error extracting data stream timestamp field: " + e.getMessage(), e); } } @@ -1741,4 +1741,20 @@ public DataStream build() { ); } } + + /** + * This is a specialised error to capture that a document does not have a valid timestamp + * to index a document. It is mainly applicable for TSDS data streams because they need the timestamp + * to determine the write index. + */ + public static class TimestampError extends IllegalArgumentException { + + public TimestampError(String message, Exception cause) { + super(message, cause); + } + + public TimestampError(String message) { + super(message); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java index fd3fc1a732acb..7315e9f7a51d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -19,6 +20,8 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import java.io.IOException; +import java.util.HashSet; +import java.util.Set; /** * A utility class that contains the mappings and settings logic for failure store indices that are a part of data streams. @@ -26,12 +29,30 @@ public class DataStreamFailureStoreDefinition { public static final String FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME = "data_streams.failure_store.refresh_interval"; + public static final String INDEX_FAILURE_STORE_VERSION_SETTING_NAME = "index.failure_store.version"; public static final Settings DATA_STREAM_FAILURE_STORE_SETTINGS; + // Only a subset of user configurable settings is applicable for a failure index. Here we have an + // allowlist that will filter all other settings out. + public static final Set SUPPORTED_USER_SETTINGS = Set.of( + DataTier.TIER_PREFERENCE, + IndexMetadata.SETTING_INDEX_HIDDEN, + INDEX_FAILURE_STORE_VERSION_SETTING_NAME, + IndexMetadata.SETTING_NUMBER_OF_SHARDS, + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, + IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), + IndexMetadata.LIFECYCLE_NAME + ); + public static final Set SUPPORTED_USER_SETTINGS_PREFIXES = Set.of( + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + ); public static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; public static final int FAILURE_STORE_DEFINITION_VERSION = 1; public static final Setting FAILURE_STORE_DEFINITION_VERSION_SETTING = Setting.intSetting( - "index.failure_store.version", + INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 0, Setting.Property.IndexScope ); @@ -40,11 +61,6 @@ public class DataStreamFailureStoreDefinition { DATA_STREAM_FAILURE_STORE_SETTINGS = Settings.builder() // Always start with the hidden settings for a backing index. .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) - // Override any pipeline settings on the failure store to not use any - // specified by the data stream template. Default pipelines are very much - // meant for the backing indices only. - .putNull(IndexSettings.DEFAULT_PIPELINE.getKey()) - .putNull(IndexSettings.FINAL_PIPELINE.getKey()) .put(FAILURE_STORE_DEFINITION_VERSION_SETTING.getKey(), FAILURE_STORE_DEFINITION_VERSION) .build(); @@ -199,4 +215,23 @@ public static Settings.Builder applyFailureStoreSettings(Settings nodeSettings, } return builder; } + + /** + * Removes the unsupported by the failure store settings from the settings provided. + * ATTENTION: This method should be applied BEFORE we set the necessary settings for an index + * @param builder the settings builder that is going to be updated + * @return the original settings builder, with the unsupported settings removed. + */ + public static Settings.Builder filterUserDefinedSettings(Settings.Builder builder) { + if (builder.keys().isEmpty() == false) { + Set existingKeys = new HashSet<>(builder.keys()); + for (String setting : existingKeys) { + if (SUPPORTED_USER_SETTINGS.contains(setting) == false + && SUPPORTED_USER_SETTINGS_PREFIXES.stream().anyMatch(setting::startsWith) == false) { + builder.remove(setting); + } + } + } + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 2df9cf706d892..5dbf4da6f376f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -425,7 +425,8 @@ public static ClusterState createFailureStoreIndex( .nameResolvedInstant(nameResolvedInstant) .performReroute(false) .setMatchingTemplate(template) - .settings(indexSettings); + .settings(indexSettings) + .isFailureIndex(true); try { currentState = metadataCreateIndexService.applyCreateIndexRequest( diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 321719475c1f8..3accdd3881c6d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -983,6 +983,7 @@ static Settings aggregateIndexSettings( final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); final IndexMode templateIndexMode = Optional.of(request) + .filter(r -> r.isFailureIndex() == false) .map(CreateIndexClusterStateUpdateRequest::matchingTemplate) .map(metadata::retrieveIndexModeFromTemplate) .orElse(null); @@ -1038,11 +1039,13 @@ static Settings aggregateIndexSettings( // Finally, we actually add the explicit defaults prior to the template settings and the // request settings, so that the precedence goes: - // Explicit Defaults -> Template -> Request -> Necessary Settings (# of shards, uuid, etc) + // Explicit Defaults -> Template -> Request -> Filter out failure store settings -> Necessary Settings (# of shards, uuid, etc) indexSettingsBuilder.put(additionalIndexSettings.build()); indexSettingsBuilder.put(templateSettings.build()); } - + if (request.isFailureIndex()) { + DataStreamFailureStoreDefinition.filterUserDefinedSettings(indexSettingsBuilder); + } // now, put the request settings, so they override templates indexSettingsBuilder.put(requestSettings.build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java new file mode 100644 index 0000000000000..38d4031755a55 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.cluster.metadata.DataStreamFailureStoreDefinition.INDEX_FAILURE_STORE_VERSION_SETTING_NAME; +import static org.hamcrest.Matchers.equalTo; + +public class DataStreamFailureStoreDefinitionTests extends ESTestCase { + + public void testSettingsFiltering() { + // Empty + Settings.Builder builder = Settings.builder(); + Settings.Builder expectedBuilder = Settings.builder(); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys(), equalTo(expectedBuilder.keys())); + + // All supported settings + builder.put(INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(DataTier.TIER_PREFERENCE, "data_cold") + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-10") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexMetadata.LIFECYCLE_NAME, "my-policy") + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)); + // We expect no changes + expectedBuilder = Settings.builder().put(builder.build()); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys(), equalTo(expectedBuilder.keys())); + + // Remove unsupported settings + String randomSetting = randomAlphaOfLength(10); + builder.put(INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(DataTier.TIER_PREFERENCE, "data_cold") + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-10") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexMetadata.LIFECYCLE_NAME, "my-policy") + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexSettings.MODE.getKey(), randomFrom(IndexMode.values())) + .put(randomSetting, randomAlphaOfLength(10)); + // We expect no changes + expectedBuilder = Settings.builder().put(builder.build()); + assertThat( + DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().size(), + equalTo(expectedBuilder.keys().size() - 2) + ); + assertThat( + DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().contains(IndexSettings.MODE.getKey()), + equalTo(false) + ); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().contains(randomSetting), equalTo(false)); + } + +} diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml new file mode 100644 index 0000000000000..21e4f49fe7af5 --- /dev/null +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml @@ -0,0 +1,99 @@ +--- +teardown: + - do: + indices.delete_data_stream: + name: my-logs-fs + ignore: 404 + + - do: + indices.delete_index_template: + name: template + ignore: 404 + + - do: + indices.delete_data_stream: + name: my-logs-db + ignore: 404 + - do: + indices.delete_index_template: + name: template1 + ignore: 404 + +--- +Test failure store with logsdb: + - requires: + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ logsdb_index_mode ] + - method: POST + path: /_bulk + capabilities: [ 'failure_store_status' ] + - method: PUT + path: /_bulk + capabilities: [ 'failure_store_status' ] + reason: "Support for 'logsdb' index mode & failure status capability required" + + - do: + allowed_warnings: + - "index template [my-template] has index patterns [my-logs-fs*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" + indices.put_index_template: + name: my-template + body: + index_patterns: ["my-logs-fs*"] + data_stream: + failure_store: true + template: + settings: + index: + mode: logsdb + number_of_replicas: 1 + number_of_shards: 2 + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [my-logs-db*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [ "my-logs-db*" ] + data_stream: {} + template: + settings: + index: + mode: logsdb + number_of_replicas: 1 + number_of_shards: 2 + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "my-logs-fs"} }' + - '{"@timestamp":"2019-08-06T12:09:12.375Z", "log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - '{ "create": { "_index": "my-logs-db"} }' + - '{ "@timestamp": "2022-01-01", "log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer" }' + - '{ "create": { "_index": "my-logs-fs"} }' + - '{"log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - '{ "create": { "_index": "my-logs-db"} }' + - '{"log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - is_true: errors + + # Successfully indexed to backing index + - match: { items.0.create._index: '/\.ds-my-logs-fs-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.0.create.status: 201 } + - is_false: items.0.create.failure_store + - match: { items.1.create._index: '/\.ds-my-logs-db-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.1.create.status: 201 } + - is_false: items.1.create.failure_store + + # Successfully indexed to failure store + - match: { items.2.create._index: '/\.fs-my-logs-fs-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.2.create.status: 201 } + - match: { items.2.create.failure_store: used } + + # Rejected, eligible to go to failure store, but failure store not enabled + - match: { items.3.create._index: '/\.ds-my-logs-db-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.3.create.status: 400 } + - match: { items.3.create.error.type: document_parsing_exception } + - match: { items.3.create.failure_store: not_enabled } From 477f0cd68b5b9e02dcca0ff04d7f8d9b75c23bc2 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 22 Oct 2024 09:41:55 +0200 Subject: [PATCH 007/202] Use pattern in wipeAllIndices and unmute testUpgradeMovesRepoToNewMetaVersion (#115232) The inference index added to the delete index call doesn't exist in all 8.x versions. AFAICT, since this is not a pattern, the wipeAllIndices call fails since it is not able to find that index. Using a wildcard instead seems to resolve the issue. Closes #114994 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 971fc161c4632..d4accd399cace 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -308,9 +308,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testUpgradeMovesRepoToNewMetaVersion - issue: https://github.com/elastic/elasticsearch/issues/114994 - class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT method: testReadOnlyRepo issue: https://github.com/elastic/elasticsearch/issues/114997 From 05fc23a44005bea35035737687bc9758f27d768e Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 22 Oct 2024 10:50:39 +0200 Subject: [PATCH 008/202] Unmute MultiVersionRepositoryAccessIT.testReadOnlyRepo (#115215) I think the test failure opened is not correct, and relates to another failure. Relates https://github.com/elastic/elasticsearch/issues/114999 and https://github.com/elastic/elasticsearch/issues/114997. --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index d4accd399cace..1cb8baa96a942 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -308,9 +308,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testReadOnlyRepo - issue: https://github.com/elastic/elasticsearch/issues/114997 - class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT method: testCreateAndRestoreSnapshot issue: https://github.com/elastic/elasticsearch/issues/114998 From f32051f4629f6f1a4192a615ec0cb5e294089fb2 Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:09:19 +0200 Subject: [PATCH 009/202] fix: use setting instead of (#115193) --- docs/reference/mapping/types/binary.asciidoc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index 5733a28eb711a..81ba44c954e0a 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -68,8 +68,16 @@ Synthetic source may sort `binary` values in order of their byte representation. ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "binary": { "type": "binary", "doc_values": true } } From c7f53ff3b639555736e2b6d0864e537cacbc2d59 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 22 Oct 2024 12:30:22 +0100 Subject: [PATCH 010/202] [ML] Dynamically get of num allocations for ml node models (#115233) The GET inference API which should dynamically update the num_allocations field with the actual number from the deployed model which is useful when adaptive allocations are used --- .../inference/InferenceService.java | 4 + .../inference/CreateFromDeploymentIT.java | 63 ++++++++++++++++ .../xpack/inference/InferenceCrudIT.java | 6 ++ .../TransportGetInferenceModelAction.java | 73 +++++++++++++++---- .../ElasticsearchInternalModel.java | 6 +- .../ElasticsearchInternalService.java | 49 ++++++++++++- .../ElasticsearchInternalServiceSettings.java | 10 ++- .../ElserInternalModelTests.java | 30 ++++++++ 8 files changed, 219 insertions(+), 22 deletions(-) create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index d437533a8603d..2c99563955746 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -210,4 +210,8 @@ default List defaultConfigIds() { default void defaultConfigs(ActionListener> defaultsListener) { defaultsListener.onResponse(List.of()); } + + default void updateModelsWithDynamicFields(List model, ActionListener> listener) { + listener.onResponse(model); + } } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java index f81ebc25dc860..0bfb6e9e43b03 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java @@ -109,6 +109,55 @@ public void testModelIdDoesNotMatch() throws IOException { ); } + public void testNumAllocationsIsUpdated() throws IOException { + var modelId = "update_num_allocations"; + var deploymentId = modelId; + + CustomElandModelIT.createMlNodeTextExpansionModel(modelId, client()); + var response = startMlNodeDeploymemnt(modelId, deploymentId); + assertOkOrCreated(response); + + var inferenceId = "test_num_allocations_updated"; + var putModel = putModel(inferenceId, endpointConfig(deploymentId), TaskType.SPARSE_EMBEDDING); + var serviceSettings = putModel.get("service_settings"); + assertThat( + putModel.toString(), + serviceSettings, + is( + Map.of( + "num_allocations", + 1, + "num_threads", + 1, + "model_id", + "update_num_allocations", + "deployment_id", + "update_num_allocations" + ) + ) + ); + + assertOkOrCreated(updateMlNodeDeploymemnt(deploymentId, 2)); + + var updatedServiceSettings = getModel(inferenceId).get("service_settings"); + assertThat( + updatedServiceSettings.toString(), + updatedServiceSettings, + is( + Map.of( + "num_allocations", + 2, + "num_threads", + 1, + "model_id", + "update_num_allocations", + "deployment_id", + "update_num_allocations" + ) + ) + ); + } + private String endpointConfig(String deploymentId) { return Strings.format(""" { @@ -147,6 +196,20 @@ private Response startMlNodeDeploymemnt(String modelId, String deploymentId) thr return client().performRequest(request); } + private Response updateMlNodeDeploymemnt(String deploymentId, int numAllocations) throws IOException { + String endPoint = "/_ml/trained_models/" + deploymentId + "/deployment/_update"; + + var body = Strings.format(""" + { + "number_of_allocations": %d + } + """, numAllocations); + + Request request = new Request("POST", endPoint); + request.setJsonEntity(body); + return client().performRequest(request); + } + protected void stopMlNodeDeployment(String deploymentId) throws IOException { String endpoint = "/_ml/trained_models/" + deploymentId + "/deployment/_stop"; Request request = new Request("POST", endpoint); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index cbc50c361e3b5..37de2caadb475 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -24,6 +24,7 @@ import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalToIgnoringCase; import static org.hamcrest.Matchers.hasSize; @@ -326,4 +327,9 @@ public void testSupportedStream() throws Exception { deleteModel(modelId); } } + + public void testGetZeroModels() throws IOException { + var models = getModels("_all", TaskType.RERANK); + assertThat(models, empty()); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java index edcec45b50a16..01e663df4a3ea 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java @@ -9,13 +9,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.injection.guice.Inject; @@ -29,8 +29,11 @@ import org.elasticsearch.xpack.inference.registry.ModelRegistry; import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.concurrent.Executor; +import java.util.stream.Collectors; public class TransportGetInferenceModelAction extends HandledTransportAction< GetInferenceModelAction.Request, @@ -96,39 +99,77 @@ private void getSingleModel( var model = service.get() .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()); - delegate.onResponse(new GetInferenceModelAction.Response(List.of(model.getConfigurations()))); + + service.get() + .updateModelsWithDynamicFields( + List.of(model), + delegate.delegateFailureAndWrap( + (l2, updatedModels) -> l2.onResponse( + new GetInferenceModelAction.Response( + updatedModels.stream().map(Model::getConfigurations).collect(Collectors.toList()) + ) + ) + ) + ); })); } private void getAllModels(boolean persistDefaultEndpoints, ActionListener listener) { modelRegistry.getAllModels( persistDefaultEndpoints, - listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) + listener.delegateFailureAndWrap((l, models) -> executor.execute(() -> parseModels(models, listener))) ); } private void getModelsByTaskType(TaskType taskType, ActionListener listener) { modelRegistry.getModelsByTaskType( taskType, - listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) + listener.delegateFailureAndWrap((l, models) -> executor.execute(() -> parseModels(models, listener))) ); } - private GetInferenceModelAction.Response parseModels(List unparsedModels) { - var parsedModels = new ArrayList(); + private void parseModels(List unparsedModels, ActionListener listener) { + if (unparsedModels.isEmpty()) { + listener.onResponse(new GetInferenceModelAction.Response(List.of())); + return; + } - for (var unparsedModel : unparsedModels) { - var service = serviceRegistry.getService(unparsedModel.service()); - if (service.isEmpty()) { - throw serviceNotFoundException(unparsedModel.service(), unparsedModel.inferenceEntityId()); + var parsedModelsByService = new HashMap>(); + try { + for (var unparsedModel : unparsedModels) { + var service = serviceRegistry.getService(unparsedModel.service()); + if (service.isEmpty()) { + throw serviceNotFoundException(unparsedModel.service(), unparsedModel.inferenceEntityId()); + } + var list = parsedModelsByService.computeIfAbsent(service.get().name(), s -> new ArrayList<>()); + list.add( + service.get() + .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()) + ); } - parsedModels.add( - service.get() - .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()) - .getConfigurations() + + var groupedListener = new GroupedActionListener>( + parsedModelsByService.entrySet().size(), + listener.delegateFailureAndWrap((delegate, listOfListOfModels) -> { + var modifiable = new ArrayList(); + for (var l : listOfListOfModels) { + modifiable.addAll(l); + } + modifiable.sort(Comparator.comparing(Model::getInferenceEntityId)); + delegate.onResponse( + new GetInferenceModelAction.Response(modifiable.stream().map(Model::getConfigurations).collect(Collectors.toList())) + ); + }) ); + + for (var entry : parsedModelsByService.entrySet()) { + serviceRegistry.getService(entry.getKey()) + .get() // must be non-null to get this far + .updateModelsWithDynamicFields(entry.getValue(), groupedListener); + } + } catch (Exception e) { + listener.onFailure(e); } - return new GetInferenceModelAction.Response(parsedModels); } private ElasticsearchStatusException serviceNotFoundException(String service, String inferenceId) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java index d38def8dca47f..8b2969c39b7ba 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java @@ -21,7 +21,7 @@ public abstract class ElasticsearchInternalModel extends Model { - protected final ElasticsearchInternalServiceSettings internalServiceSettings; + protected ElasticsearchInternalServiceSettings internalServiceSettings; public ElasticsearchInternalModel( String inferenceEntityId, @@ -91,6 +91,10 @@ public ElasticsearchInternalServiceSettings getServiceSettings() { return (ElasticsearchInternalServiceSettings) super.getServiceSettings(); } + public void updateNumAllocations(Integer numAllocations) { + this.internalServiceSettings.setNumAllocations(numAllocations); + } + @Override public String toString() { return Strings.toString(this.getConfigurations()); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 389a9fa369c21..49919fda9f89d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; @@ -56,6 +57,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -786,11 +788,50 @@ public List defaultConfigIds() { ); } - /** - * Default configurations that can be out of the box without creating an endpoint first. - * @param defaultsListener Config listener - */ @Override + public void updateModelsWithDynamicFields(List models, ActionListener> listener) { + + if (models.isEmpty()) { + listener.onResponse(models); + return; + } + + var modelsByDeploymentIds = new HashMap(); + for (var model : models) { + assert model instanceof ElasticsearchInternalModel; + + if (model instanceof ElasticsearchInternalModel esModel) { + modelsByDeploymentIds.put(esModel.mlNodeDeploymentId(), esModel); + } else { + listener.onFailure( + new ElasticsearchStatusException( + "Cannot update model [{}] as it is not an Elasticsearch service model", + RestStatus.INTERNAL_SERVER_ERROR, + model.getInferenceEntityId() + ) + ); + return; + } + } + + String deploymentIds = String.join(",", modelsByDeploymentIds.keySet()); + client.execute( + GetDeploymentStatsAction.INSTANCE, + new GetDeploymentStatsAction.Request(deploymentIds), + ActionListener.wrap(stats -> { + for (var deploymentStats : stats.getStats().results()) { + var model = modelsByDeploymentIds.get(deploymentStats.getDeploymentId()); + model.updateNumAllocations(deploymentStats.getNumberOfAllocations()); + } + listener.onResponse(new ArrayList<>(modelsByDeploymentIds.values())); + }, e -> { + logger.warn("Get deployment stats failed, cannot update the endpoint's number of allocations", e); + // continue with the original response + listener.onResponse(models); + }) + ); + } + public void defaultConfigs(ActionListener> defaultsListener) { preferredModelVariantFn.accept(defaultsListener.delegateFailureAndWrap((delegate, preferredModelVariant) -> { if (PreferredModelVariant.LINUX_X86_OPTIMIZED.equals(preferredModelVariant)) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index fedf48fb583a3..962c939146ef2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -39,7 +39,7 @@ public class ElasticsearchInternalServiceSettings implements ServiceSettings { public static final String DEPLOYMENT_ID = "deployment_id"; public static final String ADAPTIVE_ALLOCATIONS = "adaptive_allocations"; - private final Integer numAllocations; + private Integer numAllocations; private final int numThreads; private final String modelId; private final AdaptiveAllocationsSettings adaptiveAllocationsSettings; @@ -172,6 +172,10 @@ public ElasticsearchInternalServiceSettings(StreamInput in) throws IOException { : null; } + public void setNumAllocations(Integer numAllocations) { + this.numAllocations = numAllocations; + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { @@ -194,6 +198,10 @@ public String modelId() { return modelId; } + public String deloymentId() { + return modelId; + } + public Integer getNumAllocations() { return numAllocations; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java new file mode 100644 index 0000000000000..96cd42efa42f5 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; + +public class ElserInternalModelTests extends ESTestCase { + public void testUpdateNumAllocation() { + var model = new ElserInternalModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElasticsearchInternalService.NAME, + new ElserInternalServiceSettings(null, 1, "elser", null), + new ElserMlNodeTaskSettings(), + null + ); + + model.updateNumAllocations(1); + assertEquals(1, model.getServiceSettings().getNumAllocations().intValue()); + + model.updateNumAllocations(null); + assertNull(model.getServiceSettings().getNumAllocations()); + } +} From 332c9224f2f7c774dc470c65c20fd2836b9f3ee9 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Tue, 22 Oct 2024 09:09:21 -0400 Subject: [PATCH 011/202] onProcessFileChangesException (#115038) --- .../common/file/AbstractFileWatchingService.java | 16 ++++++++++++---- .../service/FileSettingsService.java | 12 +++++++++++- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java index a900722397edd..41998bf974bf9 100644 --- a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java +++ b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java @@ -313,12 +313,20 @@ void processSettingsOnServiceStartAndNotifyListeners() throws InterruptedExcepti void processSettingsAndNotifyListeners() throws InterruptedException { try { processFileChanges(); - for (var listener : eventListeners) { - listener.watchedFileChanged(); - } } catch (IOException | ExecutionException e) { - logger.error(() -> "Error processing watched file: " + watchedFile(), e); + onProcessFileChangesException(e); + return; } + for (var listener : eventListeners) { + listener.watchedFileChanged(); + } + } + + /** + * Called for checked exceptions only. + */ + protected void onProcessFileChangesException(Exception e) { + logger.error(() -> "Error processing watched file: " + watchedFile(), e); } // package private for testing diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 811b59465ce76..601fc3c86d98f 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; @@ -144,7 +145,16 @@ private void processFileChanges(ReservedStateVersionCheck versionCheck) throws I } @Override - protected void processInitialFileMissing() throws ExecutionException, InterruptedException { + protected void onProcessFileChangesException(Exception e) { + if (e instanceof ExecutionException && e.getCause() instanceof FailedToCommitClusterStateException f) { + logger.error("Unable to commit cluster state", e); + } else { + super.onProcessFileChangesException(e); + } + } + + @Override + protected void processInitialFileMissing() throws ExecutionException, InterruptedException, IOException { PlainActionFuture completion = new PlainActionFuture<>(); logger.info("setting file [{}] not found, initializing [{}] as empty", watchedFile(), NAMESPACE); stateService.initEmpty(NAMESPACE, completion); From 003fbc73f6f4913135acf6ce4484e8e8ba032251 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Tue, 22 Oct 2024 16:26:30 +0300 Subject: [PATCH 012/202] Adding validation for incompatibility of compound retrievers and scroll (#115106) --- .../search/builder/SearchSourceBuilder.java | 2 +- .../retriever/CompoundRetrieverBuilder.java | 10 +++++++--- .../search/retriever/RetrieverBuilder.java | 1 + .../action/search/SearchRequestTests.java | 15 ++++++++++++++- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 6ceb02f0e797f..9c96319136007 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -2179,7 +2179,7 @@ public ActionRequestValidationException validate( boolean allowPartialSearchResults ) { if (retriever() != null) { - validationException = retriever().validate(this, validationException, allowPartialSearchResults); + validationException = retriever().validate(this, validationException, isScroll, allowPartialSearchResults); List specified = new ArrayList<>(); if (subSearches().isEmpty() == false) { specified.add(QUERY_FIELD.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index 85dabf6eb6465..7373bc5b75049 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -176,9 +176,10 @@ public final void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceB public ActionRequestValidationException validate( SearchSourceBuilder source, ActionRequestValidationException validationException, + boolean isScroll, boolean allowPartialSearchResults ) { - validationException = super.validate(source, validationException, allowPartialSearchResults); + validationException = super.validate(source, validationException, isScroll, allowPartialSearchResults); if (source.size() > rankWindowSize) { validationException = addValidationError( "[" @@ -194,12 +195,15 @@ public ActionRequestValidationException validate( } if (allowPartialSearchResults) { validationException = addValidationError( - "cannot specify a compound retriever and [allow_partial_search_results]", + "cannot specify [" + getName() + "] and [allow_partial_search_results]", validationException ); } + if (isScroll) { + validationException = addValidationError("cannot specify [" + getName() + "] and [scroll]", validationException); + } for (RetrieverSource innerRetriever : innerRetrievers) { - validationException = innerRetriever.retriever().validate(source, validationException, allowPartialSearchResults); + validationException = innerRetriever.retriever().validate(source, validationException, isScroll, allowPartialSearchResults); } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 882d44adb79c3..5e36ad0fd4fd6 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -239,6 +239,7 @@ public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { public ActionRequestValidationException validate( SearchSourceBuilder source, ActionRequestValidationException validationException, + boolean isScroll, boolean allowPartialSearchResults ) { return validationException; diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index c6ca97fd5694a..526961d74bf52 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -291,10 +291,23 @@ public void testValidate() throws IOException { assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); assertEquals( - "cannot specify a compound retriever and [allow_partial_search_results]", + "cannot specify [test_compound_retriever_builder] and [allow_partial_search_results]", validationErrors.validationErrors().get(0) ); } + { + // scroll and compound retriever + SearchRequest searchRequest = createSearchRequest().source( + new SearchSourceBuilder().retriever(new TestCompoundRetrieverBuilder(randomIntBetween(1, 10))) + ); + searchRequest.allowPartialSearchResults(false); + searchRequest.scroll(TimeValue.timeValueMinutes(1)); + searchRequest.requestCache(false); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("cannot specify [test_compound_retriever_builder] and [scroll]", validationErrors.validationErrors().get(0)); + } { // allow_partial_results and non-compound retriever SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder().retriever(new RetrieverBuilder() { From e3c198a23a5e2f776b079ba27928c2578cadccef Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 22 Oct 2024 17:02:47 +0300 Subject: [PATCH 013/202] Handle setting merge conflicts for overruling settings providers (#115217) * Handle setting merge conflicts for overruling settings providers * spotless * update TransportSimulateIndexTemplateAction * update comment and add test * fix flakiness * fix flakiness --- .../TransportSimulateIndexTemplateAction.java | 17 +- .../metadata/MetadataCreateIndexService.java | 60 +++--- .../index/IndexSettingProvider.java | 11 ++ ...sportSimulateIndexTemplateActionTests.java | 23 ++- .../MetadataCreateIndexServiceTests.java | 175 ++++++++++++++++++ .../index/IndexSettingProviderTests.java | 40 ++-- ...RestIT.java => LogsdbWithBasicRestIT.java} | 71 ++++++- .../SyntheticSourceIndexSettingsProvider.java | 6 + 8 files changed, 362 insertions(+), 41 deletions(-) rename x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/{LogsdbRestIT.java => LogsdbWithBasicRestIT.java} (59%) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 5e3799cd14518..94d9b87467ea8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -48,6 +48,7 @@ import java.time.Instant; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -270,6 +271,7 @@ public static Template resolveTemplate( // First apply settings sourced from index settings providers final var now = Instant.now(); Settings.Builder additionalSettings = Settings.builder(); + Set overrulingSettings = new HashSet<>(); for (var provider : indexSettingProviders) { Settings result = provider.getAdditionalIndexSettings( indexName, @@ -283,8 +285,21 @@ public static Template resolveTemplate( MetadataCreateIndexService.validateAdditionalSettings(provider, result, additionalSettings); dummySettings.put(result); additionalSettings.put(result); + if (provider.overrulesTemplateAndRequestSettings()) { + overrulingSettings.addAll(result.keySet()); + } } - // Then apply settings resolved from templates: + + if (overrulingSettings.isEmpty() == false) { + // Filter any conflicting settings from overruling providers, to avoid overwriting their values from templates. + final Settings.Builder filtered = Settings.builder().put(templateSettings); + for (String setting : overrulingSettings) { + filtered.remove(setting); + } + templateSettings = filtered.build(); + } + + // Apply settings resolved from templates. dummySettings.put(templateSettings); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 3accdd3881c6d..69e3b7b70ff82 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -992,6 +992,7 @@ static Settings aggregateIndexSettings( // additionalIndexSettings map final Settings.Builder additionalIndexSettings = Settings.builder(); final var resolvedAt = Instant.ofEpochMilli(request.getNameResolvedAt()); + Set overrulingSettings = new HashSet<>(); for (IndexSettingProvider provider : indexSettingProviders) { var newAdditionalSettings = provider.getAdditionalIndexSettings( request.index(), @@ -1004,36 +1005,45 @@ static Settings aggregateIndexSettings( ); validateAdditionalSettings(provider, newAdditionalSettings, additionalIndexSettings); additionalIndexSettings.put(newAdditionalSettings); + if (provider.overrulesTemplateAndRequestSettings()) { + overrulingSettings.addAll(newAdditionalSettings.keySet()); + } } - // For all the explicit settings, we go through the template and request level settings - // and see if either a template or the request has "cancelled out" an explicit default - // setting. For example, if a plugin had as an explicit setting: - // "index.mysetting": "blah - // And either a template or create index request had: - // "index.mysetting": null - // We want to remove the explicit setting not only from the explicitly set settings, but - // also from the template and request settings, so that from the newly create index's - // perspective it is as though the setting has not been set at all (using the default - // value). for (String explicitSetting : additionalIndexSettings.keys()) { - if (templateSettings.keys().contains(explicitSetting) && templateSettings.get(explicitSetting) == null) { - logger.debug( - "removing default [{}] setting as it in set to null in a template for [{}] creation", - explicitSetting, - request.index() - ); - additionalIndexSettings.remove(explicitSetting); + if (overrulingSettings.contains(explicitSetting)) { + // Remove any conflicting template and request settings to use the provided values. templateSettings.remove(explicitSetting); - } - if (requestSettings.keys().contains(explicitSetting) && requestSettings.get(explicitSetting) == null) { - logger.debug( - "removing default [{}] setting as it in set to null in the request for [{}] creation", - explicitSetting, - request.index() - ); - additionalIndexSettings.remove(explicitSetting); requestSettings.remove(explicitSetting); + } else { + // For all the explicit settings, we go through the template and request level settings + // and see if either a template or the request has "cancelled out" an explicit default + // setting. For example, if a plugin had as an explicit setting: + // "index.mysetting": "blah + // And either a template or create index request had: + // "index.mysetting": null + // We want to remove the explicit setting not only from the explicitly set settings, but + // also from the template and request settings, so that from the newly create index's + // perspective it is as though the setting has not been set at all (using the default + // value). + if (templateSettings.keys().contains(explicitSetting) && templateSettings.get(explicitSetting) == null) { + logger.debug( + "removing default [{}] setting as it is set to null in a template for [{}] creation", + explicitSetting, + request.index() + ); + additionalIndexSettings.remove(explicitSetting); + templateSettings.remove(explicitSetting); + } + if (requestSettings.keys().contains(explicitSetting) && requestSettings.get(explicitSetting) == null) { + logger.debug( + "removing default [{}] setting as it is set to null in the request for [{}] creation", + explicitSetting, + request.index() + ); + additionalIndexSettings.remove(explicitSetting); + requestSettings.remove(explicitSetting); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java index 0180d2c8df119..6a553d5dc5440 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java @@ -57,4 +57,15 @@ Settings getAdditionalIndexSettings( record Parameters(CheckedFunction mapperServiceFactory) { } + + /** + * Indicates whether the additional settings that this provider returns can overrule the settings defined in matching template + * or in create index request. + * + * Note that this is not used during index template validation, to avoid overruling template settings that may apply to + * different contexts (e.g. the provider is not used, or it returns different setting values). + */ + default boolean overrulesTemplateAndRequestSettings() { + return false; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java index 74408b99e92ce..95446149f026b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java @@ -49,7 +49,9 @@ public void testSettingsProviderIsOverridden() throws Exception { matchingTemplate, ComposableIndexTemplate.builder() .indexPatterns(List.of("test_index*")) - .template(new Template(Settings.builder().put("test-setting", 1).build(), null, null)) + .template( + new Template(Settings.builder().put("test-setting", 1).put("test-setting-2", 2).build(), null, null) + ) .build() ) ) @@ -78,6 +80,24 @@ public Settings getAdditionalIndexSettings( ) { return Settings.builder().put("test-setting", 0).build(); } + }, new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("test-setting-2", 10).build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } }); Template resolvedTemplate = TransportSimulateIndexTemplateAction.resolveTemplate( @@ -92,5 +112,6 @@ public Settings getAdditionalIndexSettings( ); assertThat(resolvedTemplate.settings().getAsInt("test-setting", -1), is(1)); + assertThat(resolvedTemplate.settings().getAsInt("test-setting-2", -1), is(10)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 05382de49087d..96a74d2e23aad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -44,8 +44,10 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -74,6 +76,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -691,6 +694,178 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); } + public void testAggregateSettingsProviderOverrulesSettingsFromRequest() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("request_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesNullFromRequest() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().putNull("request_setting").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("request_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesSettingsFromTemplates() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("template_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesNullFromTemplates() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().putNull("template_setting")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("template_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + public void testInvalidAliasName() { final String[] invalidAliasNames = new String[] { "-alias1", "+alias2", "_alias3", "a#lias", "al:ias", ".", ".." }; String aliasName = randomFrom(invalidAliasNames); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java index 628de0b047bf5..adac8bf204f3e 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java @@ -23,15 +23,24 @@ public class IndexSettingProviderTests extends ESSingleNodeTestCase { public void testIndexCreation() throws Exception { - var indexService = createIndex("my-index1"); + Settings settings = Settings.builder().put("index.mapping.depth.limit", 10).build(); + var indexService = createIndex("my-index1", settings); assertFalse(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("10", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); INDEX_SETTING_PROVIDER1_ENABLED.set(true); - indexService = createIndex("my-index2"); + indexService = createIndex("my-index2", settings); assertTrue(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("10", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); + INDEX_SETTING_OVERRULING.set(true); + indexService = createIndex("my-index3", settings); + assertTrue(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("100", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); + + INDEX_SETTING_DEPTH_ENABLED.set(false); INDEX_SETTING_PROVIDER2_ENABLED.set(true); - var e = expectThrows(IllegalArgumentException.class, () -> createIndex("my-index3")); + var e = expectThrows(IllegalArgumentException.class, () -> createIndex("my-index4", settings)); assertEquals( "additional index setting [index.refresh_interval] added by [TestIndexSettingsProvider] is already present", e.getMessage() @@ -47,7 +56,7 @@ public static class Plugin1 extends Plugin { @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { - return List.of(new TestIndexSettingsProvider("index.refresh_interval", "-1", INDEX_SETTING_PROVIDER1_ENABLED)); + return List.of(new TestIndexSettingsProvider("-1", INDEX_SETTING_PROVIDER1_ENABLED)); } } @@ -56,22 +65,22 @@ public static class Plugin2 extends Plugin { @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { - return List.of(new TestIndexSettingsProvider("index.refresh_interval", "100s", INDEX_SETTING_PROVIDER2_ENABLED)); + return List.of(new TestIndexSettingsProvider("100s", INDEX_SETTING_PROVIDER2_ENABLED)); } } private static final AtomicBoolean INDEX_SETTING_PROVIDER1_ENABLED = new AtomicBoolean(false); private static final AtomicBoolean INDEX_SETTING_PROVIDER2_ENABLED = new AtomicBoolean(false); + private static final AtomicBoolean INDEX_SETTING_DEPTH_ENABLED = new AtomicBoolean(true); + private static final AtomicBoolean INDEX_SETTING_OVERRULING = new AtomicBoolean(false); static class TestIndexSettingsProvider implements IndexSettingProvider { - private final String settingName; - private final String settingValue; + private final String intervalValue; private final AtomicBoolean enabled; - TestIndexSettingsProvider(String settingName, String settingValue, AtomicBoolean enabled) { - this.settingName = settingName; - this.settingValue = settingValue; + TestIndexSettingsProvider(String intervalValue, AtomicBoolean enabled) { + this.intervalValue = intervalValue; this.enabled = enabled; } @@ -86,10 +95,19 @@ public Settings getAdditionalIndexSettings( List combinedTemplateMappings ) { if (enabled.get()) { - return Settings.builder().put(settingName, settingValue).build(); + var builder = Settings.builder().put("index.refresh_interval", intervalValue); + if (INDEX_SETTING_DEPTH_ENABLED.get()) { + builder.put("index.mapping.depth.limit", 100); + } + return builder.build(); } else { return Settings.EMPTY; } } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return INDEX_SETTING_OVERRULING.get(); + } } } diff --git a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java similarity index 59% rename from x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java rename to x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java index edecf4eb9669e..f5ac107628d1a 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; @@ -19,7 +20,7 @@ import java.util.List; import java.util.Map; -public class LogsdbRestIT extends ESRestTestCase { +public class LogsdbWithBasicRestIT extends ESRestTestCase { @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() @@ -96,7 +97,7 @@ public void testLogsdbOverrideSyntheticSourceModeInMapping() throws IOException assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } - public void testLogsdbNoOverrideSyntheticSourceSetting() throws IOException { + public void testLogsdbOverrideSyntheticSourceSetting() throws IOException { final String index = "test-index"; createIndex( index, @@ -104,6 +105,70 @@ public void testLogsdbNoOverrideSyntheticSourceSetting() throws IOException { ); var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); assertEquals("logsdb", settings.get("index.mode")); - assertEquals(SourceFieldMapper.Mode.SYNTHETIC.toString(), settings.get("index.mapping.source.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideNullSyntheticSourceSetting() throws IOException { + final String index = "test-index"; + createIndex(index, Settings.builder().put("index.mode", "logsdb").putNull("index.mapping.source.mode").build()); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideSyntheticSourceSettingInTemplate() throws IOException { + var request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["test-*"], + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + final String index = "test-index"; + createIndex(index); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideNullInTemplate() throws IOException { + var request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["test-*"], + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "mapping": { + "source": { + "mode": null + } + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + final String index = "test-index"; + createIndex(index); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index f60c941c75a7c..4625fe91294d7 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -47,6 +47,12 @@ final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider this.mapperServiceFactory = mapperServiceFactory; } + @Override + public boolean overrulesTemplateAndRequestSettings() { + // Indicates that the provider value takes precedence over any user setting. + return true; + } + @Override public Settings getAdditionalIndexSettings( String indexName, From 485aba8b0221b720f4a2e71efd61c36cc90d8ee1 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 22 Oct 2024 16:21:29 +0200 Subject: [PATCH 014/202] Change some IndexInput to RandomAccessInput in ES87TSDBDocValuesProducer (#115305) --- .../codec/tsdb/ES87TSDBDocValuesProducer.java | 21 +++++++------------ 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index d5c94de1c6942..a7560ce6f3caf 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -132,7 +132,7 @@ public BinaryDocValues getBinary(FieldInfo field) throws IOException { return DocValues.emptyBinary(); } - final IndexInput bytesSlice = data.slice("fixed-binary", entry.dataOffset, entry.dataLength); + final RandomAccessInput bytesSlice = data.randomAccessSlice(entry.dataOffset, entry.dataLength); if (entry.docsWithFieldOffset == -1) { // dense @@ -144,8 +144,7 @@ public BinaryDocValues getBinary(FieldInfo field) throws IOException { @Override public BytesRef binaryValue() throws IOException { - bytesSlice.seek((long) doc * length); - bytesSlice.readBytes(bytes.bytes, 0, length); + bytesSlice.readBytes((long) doc * length, bytes.bytes, 0, length); return bytes; } }; @@ -160,8 +159,7 @@ public BytesRef binaryValue() throws IOException { public BytesRef binaryValue() throws IOException { long startOffset = addresses.get(doc); bytes.length = (int) (addresses.get(doc + 1L) - startOffset); - bytesSlice.seek(startOffset); - bytesSlice.readBytes(bytes.bytes, 0, bytes.length); + bytesSlice.readBytes(startOffset, bytes.bytes, 0, bytes.length); return bytes; } }; @@ -184,8 +182,7 @@ public BytesRef binaryValue() throws IOException { @Override public BytesRef binaryValue() throws IOException { - bytesSlice.seek((long) disi.index() * length); - bytesSlice.readBytes(bytes.bytes, 0, length); + bytesSlice.readBytes((long) disi.index() * length, bytes.bytes, 0, length); return bytes; } }; @@ -201,8 +198,7 @@ public BytesRef binaryValue() throws IOException { final int index = disi.index(); long startOffset = addresses.get(index); bytes.length = (int) (addresses.get(index + 1L) - startOffset); - bytesSlice.seek(startOffset); - bytesSlice.readBytes(bytes.bytes, 0, bytes.length); + bytesSlice.readBytes(startOffset, bytes.bytes, 0, bytes.length); return bytes; } }; @@ -407,7 +403,7 @@ private static class TermsDict extends BaseTermsEnum { final IndexInput bytes; final long blockMask; final LongValues indexAddresses; - final IndexInput indexBytes; + final RandomAccessInput indexBytes; final BytesRef term; long ord = -1; @@ -427,7 +423,7 @@ private static class TermsDict extends BaseTermsEnum { entry.termsIndexAddressesLength ); indexAddresses = DirectMonotonicReader.getInstance(entry.termsIndexAddressesMeta, indexAddressesSlice); - indexBytes = data.slice("terms-index", entry.termsIndexOffset, entry.termsIndexLength); + indexBytes = data.randomAccessSlice(entry.termsIndexOffset, entry.termsIndexLength); term = new BytesRef(entry.maxTermLength); // add the max term length for the dictionary @@ -485,8 +481,7 @@ private BytesRef getTermFromIndex(long index) throws IOException { assert index >= 0 && index <= (entry.termsDictSize - 1) >>> entry.termsDictIndexShift; final long start = indexAddresses.get(index); term.length = (int) (indexAddresses.get(index + 1) - start); - indexBytes.seek(start); - indexBytes.readBytes(term.bytes, 0, term.length); + indexBytes.readBytes(start, term.bytes, 0, term.length); return term; } From 07374ab600c9ae708eda1f452f8c06774e50183f Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Tue, 22 Oct 2024 15:27:23 +0100 Subject: [PATCH 015/202] #111433 Watch Next Run Interval Resets On Shard Move or Node Restart (#115102) * Switch Watcher scheduler to use last exec time when restarting, moving shards or resuming from stopped. * Add tests for last runtime calculation * Update docs/changelog/115102.yaml * Add counter to watcher job executions to check no additional executions happen during test --- docs/changelog/115102.yaml | 6 + .../engine/TickerScheduleTriggerEngine.java | 40 ++- .../engine/TickerScheduleEngineTests.java | 239 ++++++++++++++++++ 3 files changed, 283 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115102.yaml diff --git a/docs/changelog/115102.yaml b/docs/changelog/115102.yaml new file mode 100644 index 0000000000000..f679bb6c223a6 --- /dev/null +++ b/docs/changelog/115102.yaml @@ -0,0 +1,6 @@ +pr: 115102 +summary: Watch Next Run Interval Resets On Shard Move or Node Restart +area: Watcher +type: bug +issues: + - 111433 diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index ced131640f0ee..cc8d0edf37014 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; @@ -32,6 +34,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -67,7 +70,11 @@ public synchronized void start(Collection jobs) { Map startingSchedules = Maps.newMapWithExpectedSize(jobs.size()); for (Watch job : jobs) { if (job.trigger() instanceof ScheduleTrigger trigger) { - startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); + if (trigger.getSchedule() instanceof IntervalSchedule) { + startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), calculateLastStartTime(job))); + } else { + startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); + } } } // why are we calling putAll() here instead of assigning a brand @@ -108,10 +115,39 @@ public void add(Watch watch) { // watcher indexing listener // this also means that updating an existing watch would not retrigger the schedule time, if it remains the same schedule if (currentSchedule == null || currentSchedule.schedule.equals(trigger.getSchedule()) == false) { - schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); + if (trigger.getSchedule() instanceof IntervalSchedule) { + schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), calculateLastStartTime(watch))); + } else { + schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); + } + } } + /** + * Attempts to calculate the epoch millis of the last time the watch was checked, If the watch has never been checked, the timestamp of + * the last state change is used. If the watch has never been checked and has never been in an active state, the current time is used. + * @param job the watch to calculate the last start time for + * @return the epoch millis of the last time the watch was checked or now + */ + private long calculateLastStartTime(Watch job) { + var lastChecked = Optional.ofNullable(job) + .map(Watch::status) + .map(WatchStatus::lastChecked) + .map(ZonedDateTime::toInstant) + .map(Instant::toEpochMilli); + + return lastChecked.orElseGet( + () -> Optional.ofNullable(job) + .map(Watch::status) + .map(WatchStatus::state) + .map(WatchStatus.State::getTimestamp) + .map(ZonedDateTime::toInstant) + .map(Instant::toEpochMilli) + .orElse(clock.millis()) + ); + } + @Override public boolean remove(String jobId) { logger.debug("Removing watch [{}] from engine (engine is running: {})", jobId, isRunning.get()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 8b7cfa75f9229..9a12b8f394eb2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; @@ -283,6 +284,244 @@ public void testAddOnlyWithNewSchedule() { assertThat(engine.getSchedules().get("_id"), not(is(activeSchedule))); } + /** + * This test verifies that a watch with a valid lastCheckedTime executes before the interval time to ensure the job resumes waiting + * from the same point it left off before the reallocation / restart + */ + public void testWatchWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus(-1L, null, null, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC), null, null, null), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var watches = Collections.singletonList(watch); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(watches); + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch without a lastCheckedTime but with a valid activationTime executes before the interval time to + * ensure the job resumes waiting from the same point it left off before the reallocation / restart + */ + public void testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus( + -1L, + new WatchStatus.State(true, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC)), + null, + null, + null, + null, + null + ), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var watches = Collections.singletonList(watch); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(watches); + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch added after service start with a lastCheckedTime executes before the interval time to ensure the job + * resumes waiting from the same point it left off before the reallocation / restart + */ + public void testAddWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus(-1L, null, null, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC), null, null, null), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(Collections.emptyList()); + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + engine.add(watch); + + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch added after service start without a lastCheckedTime but with a valid activationTime executes before + * the interval time to ensure the job resumes waiting from the same point it left off before the reallocation / restart + */ + public void testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus( + -1L, + new WatchStatus.State(true, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC)), + null, + null, + null, + null, + null + ), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(Collections.emptyList()); + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + engine.add(watch); + + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + private Watch createWatch(String name, Schedule schedule) { return new Watch( name, From 36c45c15dac54e3b318cb0c92b5e737bbf8a788a Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Tue, 22 Oct 2024 15:27:42 +0100 Subject: [PATCH 016/202] #104233 Allow Watcher Node Allocation Settings (#115251) * Update settings endpoint modified Now accepts index.routing.allocation.* settings but denies changing the allocation setting that keeps watches on data nodes * Get settings endpoint modified Now returns index.routing.allocation.* settings explicitly filters out the `index.routing.allocation.include._tier_preference` setting * Tests for modified endpoints * Update docs --- .../watcher/how-watcher-works.asciidoc | 13 ++- .../put/UpdateWatcherSettingsAction.java | 30 ++++- .../10_watcher_settings.yml | 104 ++++++++++++++++++ .../10_update_watcher_settings.yml | 66 ----------- .../TransportGetWatcherSettingsAction.java | 17 ++- 5 files changed, 149 insertions(+), 81 deletions(-) create mode 100644 x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml delete mode 100644 x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml diff --git a/docs/reference/watcher/how-watcher-works.asciidoc b/docs/reference/watcher/how-watcher-works.asciidoc index ed6e49b72e9ce..e34d4f799d99b 100644 --- a/docs/reference/watcher/how-watcher-works.asciidoc +++ b/docs/reference/watcher/how-watcher-works.asciidoc @@ -146,15 +146,18 @@ add, the more distributed the watches can be executed. If you add or remove replicas, all watches need to be reloaded. If a shard is relocated, the primary and all replicas of this particular shard will reload. -Because the watches are executed on the node, where the watch shards are, you can create -dedicated watcher nodes by using shard allocation filtering. +Because the watches are executed on the node, where the watch shards are, you +can create dedicated watcher nodes by using shard allocation filtering. To do this +, configure nodes with a dedicated `node.attr.role: watcher` property. -You could configure nodes with a dedicated `node.attr.role: watcher` property and -then configure the `.watches` index like this: +As the `.watches` index is a system index, you can't use the normal `.watcher/_settings` +endpoint to modify its routing allocation. Instead, you can use the following dedicated +endpoint to adjust the allocation of the `.watches` shards to the nodes with the +`watcher` role attribute: [source,console] ------------------------ -PUT .watches/_settings +PUT _watcher/settings { "index.routing.allocation.include.role": "watcher" } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index 42fc7c196bbcf..7b0bd8a8108e9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; public class UpdateWatcherSettingsAction extends ActionType { @@ -34,6 +34,16 @@ public class UpdateWatcherSettingsAction extends ActionType ALLOWED_SETTINGS_PREFIXES = Set.of( + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX, + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ); + + public static final Set EXPLICITLY_DENIED_SETTINGS = Set.of( + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "._tier_preference" + ); + public UpdateWatcherSettingsAction() { super(NAME); } @@ -79,13 +89,25 @@ public Map settings() { @Override public ActionRequestValidationException validate() { - Set forbiddenSettings = Sets.difference(settings.keySet(), ALLOWED_SETTING_KEYS); - if (forbiddenSettings.size() > 0) { + Set forbiddenSettings = settings.keySet() + .stream() + .filter( + setting -> (ALLOWED_SETTING_KEYS.contains(setting) == false + && ALLOWED_SETTINGS_PREFIXES.stream().noneMatch(prefix -> setting.startsWith(prefix + "."))) + || EXPLICITLY_DENIED_SETTINGS.contains(setting) + ) + .collect(Collectors.toSet()); + + if (forbiddenSettings.isEmpty() == false) { return ValidateActions.addValidationError( "illegal settings: " + forbiddenSettings + ", these settings may not be configured. Only the following settings may be configured: " - + ALLOWED_SETTING_KEYS, + + ALLOWED_SETTING_KEYS + + ", " + + ALLOWED_SETTINGS_PREFIXES.stream().map(s -> s + ".*").collect(Collectors.toSet()) + + " excluding the following explicitly denied settings: " + + EXPLICITLY_DENIED_SETTINGS, null ); } diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml new file mode 100644 index 0000000000000..f639b4f8f1a77 --- /dev/null +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml @@ -0,0 +1,104 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test" + } + } + } + } + +--- +"Test update and get watch settings api": + - do: + watcher.get_settings: { } + + - match: { index.auto_expand_replicas: "0-1" } + - match: { index.number_of_replicas: "0" } + + - do: + watcher.update_settings: + body: + index.auto_expand_replicas: "0-all" + + - do: + watcher.get_settings: { } + + - match: { index.auto_expand_replicas: "0-all" } + - is_false: index.routing.allocation.include._tier_preference + + - do: + watcher.update_settings: + body: + index.auto_expand_replicas: null + index.number_of_replicas: 1 + + - do: + watcher.get_settings: { } + + - match: { index.number_of_replicas: "1" } +--- +"Test disallowed setting name throws error": + - requires: + test_runner_features: regex + - do: + watcher.update_settings: + body: + index.disallowed_setting: "some_invalid_value" + catch: bad_request + - match: + error: + type: "action_request_validation_exception" + reason: '/illegal settings\: \[index.disallowed_setting\].*/' +--- +"Test allowed prefix setting name": + - do: + watcher.update_settings: + body: + index.routing.allocation.include.role: "watcher" + index.routing.allocation.exclude.role: "noWatcher" + index.routing.allocation.require.role: "mustWatcher" + - do: + watcher.get_settings: { } + - match: { index.routing.allocation.include.role: "watcher" } + - match: { index.routing.allocation.exclude.role: "noWatcher" } + - match: { index.routing.allocation.require.role: "mustWatcher" } +--- +"Test explicitly disallowed prefix setting name throws error": + - requires: + test_runner_features: regex + - do: + watcher.update_settings: + body: + index.routing.allocation.include.disallowed_prefix: "some_invalid_value" + catch: bad_request + - match: + error: + type: "action_request_validation_exception" + reason: '/illegal settings\: \[index.routing.allocation.include.disallowed_prefix\].*/' diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml deleted file mode 100644 index d7478d643a98a..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - ---- -"Test update and get watch settings api": - - do: - watcher.put_watch: - id: "my_watch" - body: > - { - "trigger": { - "schedule": { - "hourly": { - "minute": [ 0, 5 ] - } - } - }, - "input": { - "simple": { - "payload": { - "send": "yes" - } - } - }, - "condition": { - "always": {} - }, - "actions": { - "test_index": { - "index": { - "index": "test" - } - } - } - } - - match: { _id: "my_watch" } - - - do: - watcher.get_settings: {} - - - match: { index.auto_expand_replicas: "0-1" } - - match: { index.number_of_replicas: "0" } - - - do: - watcher.update_settings: - body: - index.auto_expand_replicas: "0-all" - - - do: - watcher.get_settings: {} - - - match: { index.auto_expand_replicas: "0-all" } - - - do: - watcher.update_settings: - body: - index.auto_expand_replicas: null - index.number_of_replicas: 1 - - - do: - watcher.get_settings: {} - - - match: { index.number_of_replicas: "1" } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java index 29349735afcd2..2962bffd68b66 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java @@ -23,8 +23,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.watcher.transport.actions.put.GetWatcherSettingsAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.ALLOWED_SETTINGS_PREFIXES; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.ALLOWED_SETTING_KEYS; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.EXPLICITLY_DENIED_SETTINGS; import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_NAME; import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_REQUEST; @@ -73,11 +75,14 @@ protected void masterOperation( */ private static Settings filterSettableSettings(Settings settings) { Settings.Builder builder = Settings.builder(); - for (String settingName : UpdateWatcherSettingsAction.ALLOWED_SETTING_KEYS) { - if (settings.hasValue(settingName)) { - builder.put(settingName, settings.get(settingName)); - } - } + settings.keySet() + .stream() + .filter( + setting -> (ALLOWED_SETTING_KEYS.contains(setting) + || ALLOWED_SETTINGS_PREFIXES.stream().anyMatch(prefix -> setting.startsWith(prefix + "."))) + && EXPLICITLY_DENIED_SETTINGS.contains(setting) == false + ) + .forEach(setting -> builder.put(setting, settings.get(setting))); return builder.build(); } From 6855db5b44e957429588dd645165650e970fd001 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Tue, 22 Oct 2024 16:34:11 +0200 Subject: [PATCH 017/202] Expose cluster-state role mappings in APIs (#114951) This PR exposes operator-defined, cluster-state role mappings in the [Get role mappings API](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html). Cluster-state role mappings are returned with a reserved suffix `-read-only-operator-mapping`, to disambiguate with native role mappings stored in the security index. CS role mappings are also marked with a `_read_only` metadata flag. It's possible to query a CS role mapping using its name both with and without the suffix. CS role mappings can be viewed via the API, but cannot be modified. To clarify this, the PUT and DELETE role mapping endpoints return header warnings if native role mappings that name-clash with CS role mappings are created, modified, or deleted. The PR also prevents the creation or role mappings with names ending in `-read-only-operator-mapping` to ensure that CS role mappings and native role mappings can always be fully disambiguated. Finally, the PR changes how CS role mappings are persisted in cluster-state. CS role mappings are written (and read from disk) in the `XContent` format. This format omits the role mapping's name. This means that if CS role mappings are ever recovered from disk (e.g., during a master-node restart), their names are erased. To address this, this PR changes CS role mapping serialization to persist the name of a mapping in a reserved metadata field, and recover it from metadata during serialization. This allows us to persist the name without BWC-breaks in role mapping `XContent` format. It also allows us to ensure that role mappings are re-written to cluster state in the new, name-preserving format the first time operator file settings are processed. Depends on: https://github.com/elastic/elasticsearch/pull/114295 Relates: ES-9628 --- docs/changelog/114951.yaml | 5 + .../FileSettingsRoleMappingUpgradeIT.java | 7 + .../service/ReservedStateUpdateTask.java | 4 +- .../support/mapper/ExpressionRoleMapping.java | 34 +++ .../security/authz/RoleMappingMetadata.java | 75 ++++- .../SecurityOnTrialLicenseRestTestCase.java | 11 +- .../rolemapping/RoleMappingRestIT.java | 268 ++++++++++++++++++ .../RoleMappingFileSettingsIT.java | 168 ++++++++--- .../FileSettingsRoleMappingsRestartIT.java | 84 ++++-- .../xpack/security/Security.java | 1 + .../ReservedRoleMappingAction.java | 8 +- .../TransportDeleteRoleMappingAction.java | 23 +- .../TransportGetRoleMappingsAction.java | 87 +++++- .../TransportPutRoleMappingAction.java | 20 +- .../mapper/ClusterStateRoleMapper.java | 26 +- .../TransportGetRoleMappingsActionTests.java | 247 +++++++++++++--- .../TransportPutRoleMappingActionTests.java | 43 ++- 17 files changed, 964 insertions(+), 147 deletions(-) create mode 100644 docs/changelog/114951.yaml create mode 100644 x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java diff --git a/docs/changelog/114951.yaml b/docs/changelog/114951.yaml new file mode 100644 index 0000000000000..4d40a063e2b02 --- /dev/null +++ b/docs/changelog/114951.yaml @@ -0,0 +1,5 @@ +pr: 114951 +summary: Expose cluster-state role mappings in APIs +area: Authentication +type: bug +issues: [] diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index 3275f3e0e136f..b3d4dfc68d399 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -25,9 +25,12 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -106,6 +109,10 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { ); assertThat(roleMappings, is(not(nullValue()))); assertThat(roleMappings.size(), equalTo(1)); + assertThat(roleMappings, is(instanceOf(Map.class))); + @SuppressWarnings("unchecked") + Map roleMapping = (Map) roleMappings; + assertThat(roleMapping.keySet(), contains("everyone_kibana-read-only-operator-mapping")); } } } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index 92e248f160f0f..c85997f72cc78 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -205,8 +205,8 @@ static boolean checkMetadataVersion( namespace, newVersion, switch (versionCheck) { - case ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION -> "less than"; - case ReservedStateVersionCheck.HIGHER_VERSION_ONLY -> "less than or equal to"; + case HIGHER_OR_SAME_VERSION -> "less than"; + case HIGHER_VERSION_ONLY -> "less than or equal to"; }, currentVersion ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 17088cff8718b..c504ebe56ed45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -54,6 +54,18 @@ */ public class ExpressionRoleMapping implements ToXContentObject, Writeable { + /** + * Reserved suffix for read-only operator-defined role mappings. + * This suffix is added to the name of all cluster-state role mappings returned via + * the {@code TransportGetRoleMappingsAction} action. + */ + public static final String READ_ONLY_ROLE_MAPPING_SUFFIX = "-read-only-operator-mapping"; + /** + * Reserved metadata field to mark role mappings as read-only. + * This field is added to the metadata of all cluster-state role mappings returned via + * the {@code TransportGetRoleMappingsAction} action. + */ + public static final String READ_ONLY_ROLE_MAPPING_METADATA_FLAG = "_read_only"; private static final ObjectParser PARSER = new ObjectParser<>("role-mapping", Builder::new); /** @@ -136,6 +148,28 @@ public ExpressionRoleMapping(StreamInput in) throws IOException { this.metadata = in.readGenericMap(); } + public static boolean hasReadOnlySuffix(String name) { + return name.endsWith(READ_ONLY_ROLE_MAPPING_SUFFIX); + } + + public static void validateNoReadOnlySuffix(String name) { + if (hasReadOnlySuffix(name)) { + throw new IllegalArgumentException( + "Invalid mapping name [" + name + "]. [" + READ_ONLY_ROLE_MAPPING_SUFFIX + "] is not an allowed suffix" + ); + } + } + + public static String addReadOnlySuffix(String name) { + return name + READ_ONLY_ROLE_MAPPING_SUFFIX; + } + + public static String removeReadOnlySuffixIfPresent(String name) { + return name.endsWith(READ_ONLY_ROLE_MAPPING_SUFFIX) + ? name.substring(0, name.length() - READ_ONLY_ROLE_MAPPING_SUFFIX.length()) + : name; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java index b38b33e082382..74c6223b1ebdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.authz; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; @@ -26,8 +28,10 @@ import java.io.IOException; import java.util.Collection; import java.util.EnumSet; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashSet; +import java.util.Map; import java.util.Objects; import java.util.Set; @@ -36,7 +40,11 @@ public final class RoleMappingMetadata extends AbstractNamedDiffable implements Metadata.Custom { + private static final Logger logger = LogManager.getLogger(RoleMappingMetadata.class); + public static final String TYPE = "role_mappings"; + public static final String METADATA_NAME_FIELD = "_es_reserved_role_mapping_name"; + public static final String FALLBACK_NAME = "name_not_available_after_deserialization"; @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -46,12 +54,7 @@ public final class RoleMappingMetadata extends AbstractNamedDiffable ExpressionRoleMapping.parse("name_not_available_after_deserialization", p), - new ParseField(TYPE) - ); + PARSER.declareObjectArray(constructorArg(), (p, c) -> parseWithNameFromMetadata(p), new ParseField(TYPE)); } private static final RoleMappingMetadata EMPTY = new RoleMappingMetadata(Set.of()); @@ -153,4 +156,64 @@ public EnumSet context() { // are not persisted. return ALL_CONTEXTS; } + + /** + * Ensures role mapping names are preserved when stored on disk using XContent format, + * which omits names. This method copies the role mapping's name into a reserved metadata field + * during serialization, allowing recovery during deserialization (e.g., after a master-node restart). + * {@link #parseWithNameFromMetadata(XContentParser)} restores the name during parsing. + */ + public static ExpressionRoleMapping copyWithNameInMetadata(ExpressionRoleMapping roleMapping) { + Map metadata = new HashMap<>(roleMapping.getMetadata()); + // note: can't use Maps.copyWith... since these create maps that don't support `null` values in map entries + if (metadata.put(METADATA_NAME_FIELD, roleMapping.getName()) != null) { + logger.error( + "Metadata field [{}] is reserved and will be overwritten with an internal system value. " + + "Rename this field in your role mapping configuration.", + METADATA_NAME_FIELD + ); + } + return new ExpressionRoleMapping( + roleMapping.getName(), + roleMapping.getExpression(), + roleMapping.getRoles(), + roleMapping.getRoleTemplates(), + metadata, + roleMapping.isEnabled() + ); + } + + /** + * If a role mapping does not yet have a name persisted in metadata, it will use a constant fallback name. This method checks if a + * role mapping has the fallback name. + */ + public static boolean hasFallbackName(ExpressionRoleMapping expressionRoleMapping) { + return expressionRoleMapping.getName().equals(FALLBACK_NAME); + } + + /** + * Parse a role mapping from XContent, restoring the name from a reserved metadata field. + * Used to parse a role mapping annotated with its name in metadata via @see {@link #copyWithNameInMetadata(ExpressionRoleMapping)}. + */ + public static ExpressionRoleMapping parseWithNameFromMetadata(XContentParser parser) throws IOException { + ExpressionRoleMapping roleMapping = ExpressionRoleMapping.parse(FALLBACK_NAME, parser); + return new ExpressionRoleMapping( + getNameFromMetadata(roleMapping), + roleMapping.getExpression(), + roleMapping.getRoles(), + roleMapping.getRoleTemplates(), + roleMapping.getMetadata(), + roleMapping.isEnabled() + ); + } + + private static String getNameFromMetadata(ExpressionRoleMapping roleMapping) { + Map metadata = roleMapping.getMetadata(); + if (metadata.containsKey(METADATA_NAME_FIELD) && metadata.get(METADATA_NAME_FIELD) instanceof String name) { + return name; + } else { + // This is valid the first time we recover from cluster-state: the old format metadata won't have a name stored in metadata yet + return FALLBACK_NAME; + } + } } diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java index 1abb9bbb067dc..523f04fb436f4 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.TestSecurityClient; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; @@ -41,9 +42,7 @@ public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase { private TestSecurityClient securityClient; - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .nodes(2) + public static LocalClusterConfigProvider commonTrialSecurityClusterConfig = cluster -> cluster.nodes(2) .distribution(DistributionType.DEFAULT) .setting("xpack.ml.enabled", "false") .setting("xpack.license.self_generated.type", "trial") @@ -62,8 +61,10 @@ public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase .user("admin_user", "admin-password", ROOT_USER_ROLE, true) .user("security_test_user", "security-test-password", "security_test_role", false) .user("x_pack_rest_user", "x-pack-test-password", ROOT_USER_ROLE, true) - .user("cat_test_user", "cat-test-password", "cat_test_role", false) - .build(); + .user("cat_test_user", "cat-test-password", "cat_test_role", false); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().apply(commonTrialSecurityClusterConfig).build(); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java new file mode 100644 index 0000000000000..51970af4b88a0 --- /dev/null +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java @@ -0,0 +1,268 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rolemapping; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class RoleMappingRestIT extends ESRestTestCase { + private static final String settingsJson = """ + { + "metadata": { + "version": "1", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "role-mapping-1": { + "enabled": true, + "roles": [ "role_1" ], + "rules": { "field": { "username": "no_user" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something", + "_es_reserved_role_mapping_name": "ignored" + } + }, + "role-mapping-2": { + "enabled": true, + "roles": [ "role_2" ], + "rules": { "field": { "username": "no_user" } } + }, + "role-mapping-3": { + "enabled": true, + "roles": [ "role_3" ], + "rules": { "field": { "username": "no_user" } }, + "metadata": { + "_read_only" : { "field": 1 }, + "_es_reserved_role_mapping_name": { "still_ignored": true } + } + } + } + } + }"""; + private static final ExpressionRoleMapping clusterStateMapping1 = new ExpressionRoleMapping( + "role-mapping-1-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_1"), + null, + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", "_read_only", true), + true + ); + private static final ExpressionRoleMapping clusterStateMapping2 = new ExpressionRoleMapping( + "role-mapping-2-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_2"), + null, + Map.of("_read_only", true), + true + ); + private static final ExpressionRoleMapping clusterStateMapping3 = new ExpressionRoleMapping( + "role-mapping-3-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_3"), + null, + Map.of("_read_only", true), + true + ); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .apply(SecurityOnTrialLicenseRestTestCase.commonTrialSecurityClusterConfig) + .configFile("operator/settings.json", Resource.fromString(settingsJson)) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testGetRoleMappings() throws IOException { + expectMappings(List.of(clusterStateMapping1, clusterStateMapping2, clusterStateMapping3)); + expectMappings(List.of(clusterStateMapping1), "role-mapping-1"); + expectMappings(List.of(clusterStateMapping1, clusterStateMapping3), "role-mapping-1", "role-mapping-3"); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName()); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName(), "role-mapping-1"); + + expect404(() -> getMappings("role-mapping-4")); + expect404(() -> getMappings("role-mapping-4-read-only-operator-mapping")); + + ExpressionRoleMapping nativeMapping1 = expressionRoleMapping("role-mapping-1"); + putMapping(nativeMapping1, createOrUpdateWarning(nativeMapping1.getName())); + + ExpressionRoleMapping nativeMapping4 = expressionRoleMapping("role-mapping-4"); + putMapping(nativeMapping4); + + expectMappings(List.of(clusterStateMapping1, clusterStateMapping2, clusterStateMapping3, nativeMapping1, nativeMapping4)); + expectMappings(List.of(clusterStateMapping1, nativeMapping1), "role-mapping-1"); + expectMappings(List.of(clusterStateMapping1, nativeMapping1), "role-mapping-1", clusterStateMapping1.getName()); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName()); + expectMappings(List.of(nativeMapping4), "role-mapping-4"); + expectMappings(List.of(nativeMapping4), "role-mapping-4", "role-mapping-4-read-only-operator-mapping"); + } + + public void testPutAndDeleteRoleMappings() throws IOException { + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-1-read-only-operator-mapping")) + ); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [role-mapping-1-read-only-operator-mapping]. " + + "[-read-only-operator-mapping] is not an allowed suffix" + ) + ); + } + + // Also fails even if a CS role mapping with that name does not exist + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-4-read-only-operator-mapping")) + ); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [role-mapping-4-read-only-operator-mapping]. " + + "[-read-only-operator-mapping] is not an allowed suffix" + ) + ); + } + + assertOK(putMapping(expressionRoleMapping("role-mapping-1"), createOrUpdateWarning("role-mapping-1"))); + + assertOK(deleteMapping("role-mapping-1", deletionWarning("role-mapping-1"))); + + // 404 without warnings if no native mapping exists + expect404(() -> deleteMapping("role-mapping-1")); + } + + private static void expect404(ThrowingRunnable clientCall) { + var ex = expectThrows(ResponseException.class, clientCall); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + + private static Response putMapping(ExpressionRoleMapping roleMapping) throws IOException { + return putMapping(roleMapping, null); + } + + private static Response putMapping(ExpressionRoleMapping roleMapping, @Nullable String warning) throws IOException { + Request request = new Request("PUT", "/_security/role_mapping/" + roleMapping.getName()); + XContentBuilder xContent = roleMapping.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS); + request.setJsonEntity(BytesReference.bytes(xContent).utf8ToString()); + if (warning != null) { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(List.of(warning)) == false).build() + ); + } + return client().performRequest(request); + } + + private static Response deleteMapping(String name) throws IOException { + return deleteMapping(name, null); + } + + private static Response deleteMapping(String name, @Nullable String warning) throws IOException { + Request request = new Request("DELETE", "/_security/role_mapping/" + name); + if (warning != null) { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(List.of(warning)) == false).build() + ); + } + return client().performRequest(request); + } + + private static ExpressionRoleMapping expressionRoleMapping(String name) { + return new ExpressionRoleMapping( + name, + new FieldExpression("username", List.of(new FieldExpression.FieldValue(randomAlphaOfLength(10)))), + List.of(randomAlphaOfLength(5)), + null, + Map.of(), + true + ); + } + + @SuppressWarnings("unchecked") + private static void expectMappings(List expectedMappings, String... requestedMappingNames) throws IOException { + Map map = responseAsMap(getMappings(requestedMappingNames)); + assertThat( + map.keySet(), + containsInAnyOrder(expectedMappings.stream().map(ExpressionRoleMapping::getName).toList().toArray(new String[0])) + ); + List actualMappings = new ArrayList<>(); + for (Map.Entry entry : map.entrySet()) { + XContentParser body = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, (Map) entry.getValue()); + ExpressionRoleMapping actual = ExpressionRoleMapping.parse(entry.getKey(), body); + actualMappings.add(actual); + } + assertThat(actualMappings, containsInAnyOrder(expectedMappings.toArray(new ExpressionRoleMapping[0]))); + } + + private static Response getMappings(String... requestedMappingNames) throws IOException { + return client().performRequest(new Request("GET", "/_security/role_mapping/" + String.join(",", requestedMappingNames))); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + private static String createOrUpdateWarning(String mappingName) { + return "A read-only role mapping with the same name [" + + mappingName + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments."; + } + + private static String deletionWarning(String mappingName) { + return "A read-only role mapping with the same name [" + + mappingName + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments."; + }; +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 3b6ffd0698623..fdd854e7a9673 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; @@ -45,6 +46,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -63,7 +65,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; @@ -271,21 +272,28 @@ private void assertRoleMappingsSaveOK(CountDownLatch savedClusterState, AtomicLo assertThat(resolveRolesFuture.get(), containsInAnyOrder("kibana_user", "fleet_user")); } - // the role mappings are not retrievable by the role mapping action (which only accesses "native" i.e. index-based role mappings) - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); - assertThat(response.mappings(), emptyArray()); - - // role mappings (with the same names) can also be stored in the "native" store - var putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")).actionGet(); - assertTrue(putRoleMappingResponse.isCreated()); - putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet(); - assertTrue(putRoleMappingResponse.isCreated()); + // the role mappings are retrievable by the role mapping action for BWC + assertGetResponseHasMappings(true, "everyone_kibana", "everyone_fleet"); + + // role mappings (with the same names) can be stored in the "native" store + { + PutRoleMappingResponse response = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")) + .actionGet(); + assertTrue(response.isCreated()); + response = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet(); + assertTrue(response.isCreated()); + } + { + // deleting role mappings that exist in the native store and in cluster-state should result in success + var response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).actionGet(); + assertTrue(response.isFound()); + response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_fleet")).actionGet(); + assertTrue(response.isFound()); + } + } - public void testRoleMappingsApplied() throws Exception { + public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { ensureGreen(); var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); @@ -294,6 +302,12 @@ public void testRoleMappingsApplied() throws Exception { assertRoleMappingsSaveOK(savedClusterState.v1(), savedClusterState.v2()); logger.info("---> cleanup cluster settings..."); + { + // Deleting non-existent native role mappings returns not found even if they exist in config file + var response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).get(); + assertFalse(response.isFound()); + } + savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); @@ -308,48 +322,96 @@ public void testRoleMappingsApplied() throws Exception { clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) ); - // native role mappings are not affected by the removal of the cluster-state based ones + // cluster-state role mapping was removed and is not returned in the API anymore { var request = new GetRoleMappingsRequest(); request.setNames("everyone_kibana", "everyone_fleet"); var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertTrue(response.hasMappings()); - assertThat( - Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), - containsInAnyOrder("everyone_kibana", "everyone_fleet") - ); + assertFalse(response.hasMappings()); } - // and roles are resolved based on the native role mappings + // no role mappings means no roles are resolved for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); userRoleMapper.resolveRoles( new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), resolveRolesFuture ); - assertThat(resolveRolesFuture.get(), contains("kibana_user_native")); + assertThat(resolveRolesFuture.get(), empty()); } + } - { - var request = new DeleteRoleMappingRequest(); - request.setName("everyone_kibana"); - var response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); - assertTrue(response.isFound()); - request = new DeleteRoleMappingRequest(); - request.setName("everyone_fleet"); - response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); - assertTrue(response.isFound()); + public void testGetRoleMappings() throws Exception { + ensureGreen(); + + final List nativeMappings = List.of("everyone_kibana", "_everyone_kibana", "zzz_mapping", "123_mapping"); + for (var mapping : nativeMappings) { + client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest(mapping)).actionGet(); } - // no roles are resolved now, because both native and cluster-state based stores have been cleared - for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { - PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); - userRoleMapper.resolveRoles( - new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), - resolveRolesFuture - ); - assertThat(resolveRolesFuture.get(), empty()); + var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + var request = new GetRoleMappingsRequest(); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + "everyone_kibana", + ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana"), + "_everyone_kibana", + ExpressionRoleMapping.addReadOnlySuffix("everyone_fleet"), + "zzz_mapping", + "123_mapping" + ) + ); + + List readOnlyFlags = new ArrayList<>(); + for (ExpressionRoleMapping mapping : response.mappings()) { + boolean isReadOnly = ExpressionRoleMapping.hasReadOnlySuffix(mapping.getName()) + && mapping.getMetadata().get("_read_only") != null; + readOnlyFlags.add(isReadOnly); } + // assert that cluster-state role mappings come last + assertThat(readOnlyFlags, contains(false, false, false, false, true, true)); + + // it's possible to delete overlapping native role mapping + assertTrue(client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).actionGet().isFound()); + + // Fetch a specific file based role + request = new GetRoleMappingsRequest(); + request.setNames(ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana")); + response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder(ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana")) + ); + + savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) + ).get(); + + assertNull( + clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) + ); + + // Make sure remaining native mappings can still be fetched + request = new GetRoleMappingsRequest(); + response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder("_everyone_kibana", "zzz_mapping", "123_mapping") + ); } public static Tuple setupClusterStateListenerForError( @@ -434,11 +496,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - // no native role mappings exist - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); + // even if index is closed, cluster-state role mappings are still returned + assertGetResponseHasMappings(true, "everyone_kibana", "everyone_fleet"); // cluster state settings are also applied var clusterStateResponse = clusterAdmin().state( @@ -477,6 +536,12 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { } } + private DeleteRoleMappingRequest deleteRequest(String name) { + var request = new DeleteRoleMappingRequest(); + request.setName(name); + return request; + } + private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { var json = """ { @@ -495,4 +560,19 @@ private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { return new PutRoleMappingRequestBuilder(null).source(name, parser).request(); } } + + private static void assertGetResponseHasMappings(boolean readOnly, String... mappings) throws InterruptedException, ExecutionException { + var request = new GetRoleMappingsRequest(); + request.setNames(mappings); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + Arrays.stream(mappings) + .map(mapping -> readOnly ? ExpressionRoleMapping.addReadOnlySuffix(mapping) : mapping) + .toArray(String[]::new) + ) + ); + } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 6c6582138ce89..97a5f080cee4e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -30,6 +30,7 @@ import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListenerForCleanup; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFileWithoutVersionIncrement; +import static org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata.METADATA_NAME_FIELD; import static org.hamcrest.Matchers.containsInAnyOrder; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) @@ -123,7 +124,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -131,7 +132,14 @@ public void testReservedStatePersistsOnRestart() throws Exception { new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -141,26 +149,29 @@ public void testReservedStatePersistsOnRestart() throws Exception { ensureGreen(); awaitFileSettingsWatcher(); - // assert busy to give mappings time to update after restart; otherwise, the role mapping names might be dummy values - // `name_not_available_after_deserialization` - assertBusy( - () -> assertRoleMappingsInClusterState( - new ExpressionRoleMapping( - "everyone_kibana_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("kibana_user"), - List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), - true + assertRoleMappingsInClusterState( + new ExpressionRoleMapping( + "everyone_kibana_alone", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), + List.of("kibana_user"), + List.of(), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), + true + ), + new ExpressionRoleMapping( + "everyone_fleet_alone", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), + List.of("fleet_user"), + List.of(), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" ), - new ExpressionRoleMapping( - "everyone_fleet_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("fleet_user"), - List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), - false - ) + false ) ); @@ -197,7 +208,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -205,7 +216,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -225,7 +243,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -233,7 +251,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -251,7 +276,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user", "kibana_admin"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo", + "something", + METADATA_NAME_FIELD, + "everyone_kibana_together" + ), true ) ) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 79a00fa1293bd..8f32bcf7ace8a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -899,6 +899,7 @@ Collection createComponents( components.add(nativeUsersStore); components.add(new PluginComponentBinding<>(NativeRoleMappingStore.class, nativeRoleMappingStore)); components.add(new PluginComponentBinding<>(UserRoleMapper.class, userRoleMapper)); + components.add(clusterStateRoleMapper); components.add(reservedRealm); components.add(realms); this.realms.set(realms); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java index 73d1a1abcdb50..837b475dea68f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java @@ -43,7 +43,7 @@ public String name() { @Override public TransformState transform(Object source, TransformState prevState) throws Exception { @SuppressWarnings("unchecked") - Set roleMappings = validate((List) source); + Set roleMappings = validateAndTranslate((List) source); RoleMappingMetadata newRoleMappingMetadata = new RoleMappingMetadata(roleMappings); if (newRoleMappingMetadata.equals(RoleMappingMetadata.getFromClusterState(prevState.state()))) { return prevState; @@ -71,7 +71,7 @@ public List fromXContent(XContentParser parser) throws IO return result; } - private Set validate(List roleMappings) { + private Set validateAndTranslate(List roleMappings) { var exceptions = new ArrayList(); for (var roleMapping : roleMappings) { // File based defined role mappings are allowed to use MetadataUtils.RESERVED_PREFIX @@ -85,6 +85,8 @@ private Set validate(List roleMapp exceptions.forEach(illegalArgumentException::addSuppressed); throw illegalArgumentException; } - return roleMappings.stream().map(PutRoleMappingRequest::getMapping).collect(Collectors.toUnmodifiableSet()); + return roleMappings.stream() + .map(r -> RoleMappingMetadata.copyWithNameInMetadata(r.getMapping())) + .collect(Collectors.toUnmodifiableSet()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java index 74129facae70a..b1fdf2e90dd46 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -16,17 +17,19 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; public class TransportDeleteRoleMappingAction extends HandledTransportAction { - private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportDeleteRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore roleMappingStore + NativeRoleMappingStore roleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super( DeleteRoleMappingAction.NAME, @@ -36,10 +39,24 @@ public TransportDeleteRoleMappingAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.roleMappingStore = roleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, DeleteRoleMappingRequest request, ActionListener listener) { - roleMappingStore.deleteRoleMapping(request, listener.safeMap(DeleteRoleMappingResponse::new)); + roleMappingStore.deleteRoleMapping(request, listener.safeMap(found -> { + if (found && clusterStateRoleMapper.hasMapping(request.getName())) { + // Allow to delete a mapping with the same name in the native role mapping store as the file_settings namespace, but + // add a warning header to signal to the caller that this could be a problem. + HeaderWarning.addWarning( + "A read-only role mapping with the same name [" + + request.getName() + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments." + ); + } + return new DeleteRoleMappingResponse(found); + })); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java index ac0d3177cca09..5f16b095db0ef 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.security.action.rolemapping; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -17,21 +19,31 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class TransportGetRoleMappingsAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportGetRoleMappingsAction.class); private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportGetRoleMappingsAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore nativeRoleMappingStore + NativeRoleMappingStore nativeRoleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super( GetRoleMappingsAction.NAME, @@ -41,19 +53,84 @@ public TransportGetRoleMappingsAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.roleMappingStore = nativeRoleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, final GetRoleMappingsRequest request, final ActionListener listener) { final Set names; if (request.getNames() == null || request.getNames().length == 0) { - names = null; + names = Set.of(); } else { names = new HashSet<>(Arrays.asList(request.getNames())); } - this.roleMappingStore.getRoleMappings(names, ActionListener.wrap(mappings -> { - ExpressionRoleMapping[] array = mappings.toArray(new ExpressionRoleMapping[mappings.size()]); - listener.onResponse(new GetRoleMappingsResponse(array)); + roleMappingStore.getRoleMappings(names, ActionListener.wrap(nativeRoleMappings -> { + final Collection clusterStateRoleMappings = clusterStateRoleMapper.getMappings( + // if the API was queried with a reserved suffix for any of the names, we need to remove it because role mappings are + // stored without it in cluster-state + removeReadOnlySuffixIfPresent(names) + ); + listener.onResponse(buildResponse(clusterStateRoleMappings, nativeRoleMappings)); }, listener::onFailure)); } + + private GetRoleMappingsResponse buildResponse( + Collection clusterStateMappings, + Collection nativeMappings + ) { + Stream translatedClusterStateMappings = clusterStateMappings.stream().filter(roleMapping -> { + if (RoleMappingMetadata.hasFallbackName(roleMapping)) { + logger.warn( + "Role mapping retrieved from cluster-state with an ambiguous name. It will be omitted from the API response." + + "This is likely a transient issue during node start-up." + ); + return false; + } + return true; + }).map(this::translateClusterStateMapping); + return new GetRoleMappingsResponse( + Stream.concat(nativeMappings.stream(), translatedClusterStateMappings).toArray(ExpressionRoleMapping[]::new) + ); + } + + private Set removeReadOnlySuffixIfPresent(Set names) { + return names.stream().map(ExpressionRoleMapping::removeReadOnlySuffixIfPresent).collect(Collectors.toSet()); + } + + /** + * Translator method for ensuring unique API names and marking cluster-state role mappings as read-only. + * Role mappings retrieved from cluster-state are surfaced through both the transport and REST layers, + * along with native role mappings. Unlike native role mappings, cluster-state role mappings are + * read-only and cannot be modified via APIs. It is possible for cluster-state and native role mappings + * to have overlapping names. + * + *

+ * This does the following: + *

+ * + *
    + *
  1. Appends a reserved suffix to cluster-state role mapping names to avoid conflicts with native role mappings.
  2. + *
  3. Marks the metadata of cluster-state role mappings with a reserved read-only flag.
  4. + *
  5. Removes internal metadata flag used in processing (see {@link RoleMappingMetadata#METADATA_NAME_FIELD}).
  6. + *
+ */ + private ExpressionRoleMapping translateClusterStateMapping(ExpressionRoleMapping mapping) { + Map metadata = new HashMap<>(mapping.getMetadata()); + if (metadata.put(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true) != null) { + logger.error( + "Metadata field [{}] is reserved and will be overwritten with an internal system value. " + + "Rename this field in your role mapping configuration.", + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG + ); + } + metadata.remove(RoleMappingMetadata.METADATA_NAME_FIELD); + return new ExpressionRoleMapping( + ExpressionRoleMapping.addReadOnlySuffix(mapping.getName()), + mapping.getExpression(), + mapping.getRoles(), + mapping.getRoleTemplates(), + metadata, + mapping.isEnabled() + ); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java index 82a3b4f000064..682ade925d2ec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -16,24 +17,41 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.validateNoReadOnlySuffix; + public class TransportPutRoleMappingAction extends HandledTransportAction { private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportPutRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore roleMappingStore + NativeRoleMappingStore roleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super(PutRoleMappingAction.NAME, transportService, actionFilters, PutRoleMappingRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.roleMappingStore = roleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, final PutRoleMappingRequest request, final ActionListener listener) { + validateNoReadOnlySuffix(request.getName()); + if (clusterStateRoleMapper.hasMapping(request.getName())) { + // Allow to define a mapping with the same name in the native role mapping store as the file_settings namespace, but add a + // warning header to signal to the caller that this could be a problem. + HeaderWarning.addWarning( + "A read-only role mapping with the same name [" + + request.getName() + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments." + ); + } roleMappingStore.putRoleMapping( request, ActionListener.wrap(created -> listener.onResponse(new PutRoleMappingResponse(created)), listener::onFailure) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java index 5dea6a938263c..99e3311283920 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; @@ -21,6 +22,7 @@ import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.SecurityExtension.SecurityComponents; @@ -28,8 +30,7 @@ * A role mapper the reads the role mapping rules (i.e. {@link ExpressionRoleMapping}s) from the cluster state * (i.e. {@link RoleMappingMetadata}). This is not enabled by default. */ -public final class ClusterStateRoleMapper extends AbstractRoleMapperClearRealmCache implements ClusterStateListener { - +public class ClusterStateRoleMapper extends AbstractRoleMapperClearRealmCache implements ClusterStateListener { /** * This setting is never registered by the xpack security plugin - in order to disable the * cluster-state based role mapper another plugin must register it as a boolean setting @@ -81,13 +82,26 @@ public void clusterChanged(ClusterChangedEvent event) { } } - private Set getMappings() { + public boolean hasMapping(String name) { + if (enabled == false) { + return false; + } + return false == getMappings(Set.of(name)).isEmpty(); + } + + public Set getMappings() { + return getMappings(null); + } + + public Set getMappings(@Nullable Set names) { if (enabled == false) { return Set.of(); - } else { - final Set mappings = RoleMappingMetadata.getFromClusterState(clusterService.state()).getRoleMappings(); - logger.trace("Retrieved [{}] mapping(s) from cluster state", mappings.size()); + } + final Set mappings = RoleMappingMetadata.getFromClusterState(clusterService.state()).getRoleMappings(); + logger.trace("Retrieved [{}] mapping(s) from cluster state", mappings.size()); + if (names == null || names.isEmpty()) { return mappings; } + return mappings.stream().filter(roleMapping -> names.contains(roleMapping.getName())).collect(Collectors.toSet()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java index 6e8698f095d32..010c19e8cc1b1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; @@ -19,21 +18,26 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import org.hamcrest.Matchers; import org.junit.Before; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; -import static org.hamcrest.Matchers.arrayContaining; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -42,8 +46,10 @@ public class TransportGetRoleMappingsActionTests extends ESTestCase { private NativeRoleMappingStore store; private TransportGetRoleMappingsAction action; - private AtomicReference> namesRef; - private List result; + private AtomicReference> nativeNamesRef; + private AtomicReference> clusterStateNamesRef; + private List nativeMappings; + private Set clusterStateMappings; @SuppressWarnings("unchecked") @Before @@ -58,68 +64,219 @@ public void setupMocks() { null, Collections.emptySet() ); - action = new TransportGetRoleMappingsAction(mock(ActionFilters.class), transportService, store); + ClusterStateRoleMapper clusterStateRoleMapper = mock(); + action = new TransportGetRoleMappingsAction(mock(ActionFilters.class), transportService, store, clusterStateRoleMapper); - namesRef = new AtomicReference<>(null); - result = Collections.emptyList(); + nativeNamesRef = new AtomicReference<>(null); + clusterStateNamesRef = new AtomicReference<>(null); + nativeMappings = Collections.emptyList(); + clusterStateMappings = Collections.emptySet(); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 1; + clusterStateNamesRef.set((Set) args[0]); + return clusterStateMappings; + }).when(clusterStateRoleMapper).getMappings(anySet()); doAnswer(invocation -> { Object[] args = invocation.getArguments(); assert args.length == 2; - namesRef.set((Set) args[0]); + nativeNamesRef.set((Set) args[0]); ActionListener> listener = (ActionListener>) args[1]; - listener.onResponse(result); + listener.onResponse(nativeMappings); return null; }).when(store).getRoleMappings(nullable(Set.class), any(ActionListener.class)); } - public void testGetSingleRole() throws Exception { - final PlainActionFuture future = new PlainActionFuture<>(); - final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames("everyone"); + public void testGetSingleRoleMappingNativeOnly() throws Exception { + testGetMappings(List.of(mapping("everyone")), Collections.emptySet(), Set.of("everyone"), Set.of("everyone"), "everyone"); + } - final ExpressionRoleMapping mapping = mock(ExpressionRoleMapping.class); - result = Collections.singletonList(mapping); - action.doExecute(mock(Task.class), request, future); - assertThat(future.get(), notNullValue()); - assertThat(future.get().mappings(), arrayContaining(mapping)); - assertThat(namesRef.get(), containsInAnyOrder("everyone")); + public void testGetMultipleNamedRoleMappingsNativeOnly() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Collections.emptySet(), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); } - public void testGetMultipleNamedRoles() throws Exception { - final PlainActionFuture future = new PlainActionFuture<>(); - final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames("admin", "engineering", "sales", "finance"); + public void testGetAllRoleMappingsNativeOnly() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Collections.emptySet(), + Set.of(), + Set.of() + ); + } - final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); - result = Arrays.asList(mapping1, mapping2, mapping3); + public void testGetSingleRoleMappingClusterStateOnly() throws Exception { + testGetMappings(List.of(), Set.of(mapping("everyone")), Set.of("everyone"), Set.of("everyone"), "everyone"); + } - action.doExecute(mock(Task.class), request, future); + public void testGetMultipleNamedRoleMappingsClusterStateOnly() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); + } + + public void testGetAllRoleMappingsClusterStateOnly() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Set.of(), + Set.of() + ); + } + + public void testGetSingleRoleMappingBoth() throws Exception { + testGetMappings(List.of(mapping("everyone")), Set.of(mapping("everyone")), Set.of("everyone"), Set.of("everyone"), "everyone"); + } + + public void testGetMultipleNamedRoleMappingsBoth() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering")), + Set.of(mapping("sales"), mapping("finance")), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); + } + + public void testGetAllRoleMappingsClusterBoth() throws Exception { + testGetMappings(List.of(mapping("admin"), mapping("engineering")), Set.of(mapping("admin"), mapping("sales")), Set.of(), Set.of()); + } + + public void testGetSingleRoleMappingQueryWithReadOnlySuffix() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("everyone")), + // suffix not stripped for native store query + Set.of("everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX), + // suffix is stripped for cluster state store + Set.of("everyone"), + "everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX + ); + + testGetMappings( + List.of(), + Set.of(mapping("everyoneread-only-operator-mapping")), + Set.of( + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ), + // suffix that is similar but not the same is not stripped + Set.of( + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ), + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ); + + testGetMappings( + List.of(mapping("everyone")), + Set.of(mapping("everyone")), + // suffix not stripped for native store query + Set.of("everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX, "everyone"), + // suffix is stripped for cluster state store + Set.of("everyone"), + "everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone" + ); + } + + public void testClusterStateRoleMappingWithFallbackNameOmitted() throws ExecutionException, InterruptedException { + testGetMappings( + List.of(), + Set.of(mapping("name_not_available_after_deserialization")), + Set.of(), + Set.of("name_not_available_after_deserialization"), + Set.of("name_not_available_after_deserialization"), + "name_not_available_after_deserialization" + ); - final GetRoleMappingsResponse response = future.get(); - assertThat(response, notNullValue()); - assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); - assertThat(namesRef.get(), containsInAnyOrder("admin", "engineering", "sales", "finance")); + testGetMappings( + List.of(mapping("name_not_available_after_deserialization")), + Set.of(mapping("name_not_available_after_deserialization")), + Set.of(), + Set.of("name_not_available_after_deserialization"), + Set.of("name_not_available_after_deserialization"), + "name_not_available_after_deserialization" + ); + } + + private void testGetMappings( + List returnedNativeMappings, + Set returnedClusterStateMappings, + Set expectedNativeNames, + Set expectedClusterStateNames, + String... names + ) throws InterruptedException, ExecutionException { + testGetMappings( + returnedNativeMappings, + returnedClusterStateMappings, + returnedClusterStateMappings.stream().map(this::expectedClusterStateMapping).collect(Collectors.toSet()), + expectedNativeNames, + expectedClusterStateNames, + names + ); } - public void testGetAllRoles() throws Exception { + private void testGetMappings( + List returnedNativeMappings, + Set returnedClusterStateMappings, + Set expectedClusterStateMappings, + Set expectedNativeNames, + Set expectedClusterStateNames, + String... names + ) throws InterruptedException, ExecutionException { final PlainActionFuture future = new PlainActionFuture<>(); final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames(Strings.EMPTY_ARRAY); - - final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); - result = Arrays.asList(mapping1, mapping2, mapping3); + request.setNames(names); + nativeMappings = returnedNativeMappings; + clusterStateMappings = returnedClusterStateMappings; action.doExecute(mock(Task.class), request, future); + assertThat(future.get(), notNullValue()); + List combined = new ArrayList<>(returnedNativeMappings); + combined.addAll(expectedClusterStateMappings); + ExpressionRoleMapping[] actualMappings = future.get().mappings(); + assertThat(actualMappings, arrayContainingInAnyOrder(combined.toArray(new ExpressionRoleMapping[0]))); + assertThat(nativeNamesRef.get(), containsInAnyOrder(expectedNativeNames.toArray(new String[0]))); + assertThat(clusterStateNamesRef.get(), containsInAnyOrder(expectedClusterStateNames.toArray(new String[0]))); + } - final GetRoleMappingsResponse response = future.get(); - assertThat(response, notNullValue()); - assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); - assertThat(namesRef.get(), Matchers.nullValue(Set.class)); + private ExpressionRoleMapping mapping(String name) { + return new ExpressionRoleMapping(name, null, null, null, Map.of(), true); } + private ExpressionRoleMapping expectedClusterStateMapping(ExpressionRoleMapping mapping) { + return new ExpressionRoleMapping( + mapping.getName() + READ_ONLY_ROLE_MAPPING_SUFFIX, + null, + null, + null, + Map.of(READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true), + true + ); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 6f789a10a3a6c..6d1ac864d20fd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.junit.Before; @@ -29,18 +30,21 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TransportPutRoleMappingActionTests extends ESTestCase { private NativeRoleMappingStore store; private TransportPutRoleMappingAction action; private AtomicReference requestRef; + private ClusterStateRoleMapper clusterStateRoleMapper; @SuppressWarnings("unchecked") @Before @@ -55,7 +59,9 @@ public void setupMocks() { null, Collections.emptySet() ); - action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store); + clusterStateRoleMapper = mock(); + when(clusterStateRoleMapper.hasMapping(any())).thenReturn(false); + action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store, clusterStateRoleMapper); requestRef = new AtomicReference<>(null); @@ -85,6 +91,41 @@ public void testPutValidMapping() throws Exception { assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); } + public void testValidMappingClashingClusterStateMapping() throws Exception { + final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*"))); + final PutRoleMappingResponse response = put("anarchy", expression, "superuser", Collections.singletonMap("dumb", true)); + when(clusterStateRoleMapper.hasMapping(any())).thenReturn(true); + + assertThat(response.isCreated(), equalTo(true)); + + final ExpressionRoleMapping mapping = requestRef.get().getMapping(); + assertThat(mapping.getExpression(), is(expression)); + assertThat(mapping.isEnabled(), equalTo(true)); + assertThat(mapping.getName(), equalTo("anarchy")); + assertThat(mapping.getRoles(), iterableWithSize(1)); + assertThat(mapping.getRoles(), contains("superuser")); + assertThat(mapping.getMetadata(), aMapWithSize(1)); + assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); + } + + public void testInvalidSuffix() { + final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*"))); + String name = ExpressionRoleMapping.addReadOnlySuffix("anarchy"); + final var ex = expectThrows(IllegalArgumentException.class, () -> { + put(name, expression, "superuser", Collections.singletonMap("dumb", true)); + }); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [" + + name + + "]. [" + + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX + + "] is not an allowed suffix" + ) + ); + } + private PutRoleMappingResponse put(String name, FieldExpression expression, String role, Map metadata) throws Exception { final PutRoleMappingRequest request = new PutRoleMappingRequest(); From ac7db5d11e771674df5957077acae1e855eb6f0c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 22 Oct 2024 10:36:40 -0400 Subject: [PATCH 018/202] ESQL: Skip unsupported grapheme cluster test (#115258) (#115321) This skips the test for reversing grapheme clusters if the node doesn't support reversing grapheme clusters. Nodes that are using a jdk before 20 won't support reversing grapheme clusters because they don't have https://bugs.openjdk.org/browse/JDK-8292387 This reworks `EsqlCapabilities` so we can easilly register it only if we're on jdk 20: ``` FN_REVERSE_GRAPHEME_CLUSTERS(Runtime.version().feature() < 20), ``` Closes #114537 Closes #114535 Closes #114536 Closes #114558 Closes #114559 Closes #114560 --- .../src/main/resources/string.csv-spec | 1 + .../xpack/esql/action/EsqlCapabilities.java | 48 ++++++++++--------- .../elasticsearch/xpack/esql/CsvTests.java | 2 +- 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 5313e6630c75d..dd9d519649c01 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1236,6 +1236,7 @@ off_on_holiday:keyword | back_home_again:keyword reverseGraphemeClusters required_capability: fn_reverse +required_capability: fn_reverse_grapheme_clusters ROW message = "áéíóúàèìòùâêîôû😊👍🏽🎉💖कंठाी" | EVAL message_reversed = REVERSE(message); message:keyword | message_reversed:keyword diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3c39406198da3..df9f14a6ac227 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -32,6 +32,13 @@ public enum Cap { */ FN_REVERSE, + /** + * Support for reversing whole grapheme clusters. This is not supported + * on JDK versions less than 20 which are not supported in ES 9.0.0+ but this + * exists to keep the {@code 8.x} branch similar to the {@code main} branch. + */ + FN_REVERSE_GRAPHEME_CLUSTERS, + /** * Support for function {@code CBRT}. Done in #108574. */ @@ -133,7 +140,7 @@ public enum Cap { * - fixed variable shadowing * - fixed Join.references(), requiring breaking change to Join serialization */ - LOOKUP_V4(true), + LOOKUP_V4(Build.current().isSnapshot()), /** * Support for requesting the "REPEAT" command. @@ -279,7 +286,7 @@ public enum Cap { /** * Support for match operator */ - MATCH_OPERATOR(true), + MATCH_OPERATOR(Build.current().isSnapshot()), /** * Removing support for the {@code META} keyword. @@ -349,7 +356,7 @@ public enum Cap { /** * Supported the text categorization function "CATEGORIZE". */ - CATEGORIZE(true), + CATEGORIZE(Build.current().isSnapshot()), /** * QSTR function @@ -375,7 +382,7 @@ public enum Cap { /** * Support named parameters for field names. */ - NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(true), + NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(Build.current().isSnapshot()), /** * Fix sorting not allowed on _source and counters. @@ -401,32 +408,22 @@ public enum Cap { */ SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG); - private final boolean snapshotOnly; - private final FeatureFlag featureFlag; + private final boolean enabled; Cap() { - this(false, null); + this.enabled = true; }; - Cap(boolean snapshotOnly) { - this(snapshotOnly, null); + Cap(boolean enabled) { + this.enabled = enabled; }; Cap(FeatureFlag featureFlag) { - this(false, featureFlag); - } - - Cap(boolean snapshotOnly, FeatureFlag featureFlag) { - assert featureFlag == null || snapshotOnly == false; - this.snapshotOnly = snapshotOnly; - this.featureFlag = featureFlag; + this.enabled = featureFlag.isEnabled(); } public boolean isEnabled() { - if (featureFlag == null) { - return Build.current().isSnapshot() || this.snapshotOnly == false; - } - return featureFlag.isEnabled(); + return enabled; } public String capabilityName() { @@ -434,12 +431,17 @@ public String capabilityName() { } } - public static final Set CAPABILITIES = capabilities(); + public static final Set CAPABILITIES = capabilities(false); - private static Set capabilities() { + /** + * Get a {@link Set} of all capabilities. If the {@code all} parameter is {@code false} + * then only enabled capabilities are returned - otherwise all + * known capabilities are returned. + */ + public static Set capabilities(boolean all) { List caps = new ArrayList<>(); for (Cap cap : Cap.values()) { - if (cap.isEnabled()) { + if (all || cap.isEnabled()) { caps.add(cap.capabilityName()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index ce072e7b0a438..63233f0c46a0d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -257,7 +257,7 @@ public final void test() throws Throwable { assertThat( "Capability is not included in the enabled list capabilities on a snapshot build. Spelling mistake?", testCase.requiredCapabilities, - everyItem(in(EsqlCapabilities.CAPABILITIES)) + everyItem(in(EsqlCapabilities.capabilities(true))) ); } else { for (EsqlCapabilities.Cap c : EsqlCapabilities.Cap.values()) { From bd54bffab2e421e25e7066b70417d279625fd705 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 22 Oct 2024 16:43:42 +0200 Subject: [PATCH 019/202] Rename methods in o.e.x.c.security.support.Automatons (#114594) Lucene 10 stopped relying in on automaton minimization and moved the underlying Hopcroft algorithm to test code (for reasoning see https://github.com/apache/lucene/pull/528). With the upgrade to Lucene 10 we currently also only determinize automata. The security Automatons utility class currently contains several methods that sound like they would minimize the automaton, but this has changed so this PR also changes the method names accordingly. --- .../permission/ApplicationPermission.java | 2 +- .../authz/permission/ClusterPermission.java | 4 ++-- .../authz/permission/FieldPermissions.java | 9 ++++++--- .../permission/FieldPermissionsCache.java | 2 +- .../authz/permission/IndicesPermission.java | 14 ++++++------- .../authz/permission/LimitedRole.java | 2 +- .../authz/privilege/IndexPrivilege.java | 6 +++--- .../core/security/support/Automatons.java | 20 +++++++++---------- 8 files changed, 31 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java index 5ba5c1fd1218a..23c93226d5494 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java @@ -53,7 +53,7 @@ public final class ApplicationPermission { return new PermissionEntry( appPriv, Sets.union(existing.resourceNames, resourceNames), - Automatons.unionAndMinimize(Arrays.asList(existing.resourceAutomaton, patterns)) + Automatons.unionAndDeterminize(Arrays.asList(existing.resourceAutomaton, patterns)) ); } })); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index 4e608281a7858..5f3da8f73a708 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -137,7 +137,7 @@ public ClusterPermission build() { } List checks = this.permissionChecks; if (false == actionAutomatons.isEmpty()) { - final Automaton mergedAutomaton = Automatons.unionAndMinimize(this.actionAutomatons); + final Automaton mergedAutomaton = Automatons.unionAndDeterminize(this.actionAutomatons); checks = new ArrayList<>(this.permissionChecks.size() + 1); checks.add(new AutomatonPermissionCheck(mergedAutomaton)); checks.addAll(this.permissionChecks); @@ -156,7 +156,7 @@ private static Automaton createAutomaton(Set allowedActionPatterns, Set< } else { final Automaton allowedAutomaton = Automatons.patterns(allowedActionPatterns); final Automaton excludedAutomaton = Automatons.patterns(excludeActionPatterns); - return Automatons.minusAndMinimize(allowedAutomaton, excludedAutomaton); + return Automatons.minusAndDeterminize(allowedAutomaton, excludedAutomaton); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index 235d7419d2bf0..ed7bbf9158278 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -147,7 +147,7 @@ public static Automaton initializePermittedFieldsAutomaton(FieldPermissionsDefin List automatonList = groups.stream() .map(g -> FieldPermissions.buildPermittedFieldsAutomaton(g.getGrantedFields(), g.getExcludedFields())) .collect(Collectors.toList()); - return Automatons.unionAndMinimize(automatonList); + return Automatons.unionAndDeterminize(automatonList); } /** @@ -189,7 +189,7 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel ); } - grantedFieldsAutomaton = Automatons.minusAndMinimize(grantedFieldsAutomaton, deniedFieldsAutomaton); + grantedFieldsAutomaton = Automatons.minusAndDeterminize(grantedFieldsAutomaton, deniedFieldsAutomaton); return grantedFieldsAutomaton; } @@ -206,7 +206,10 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel public FieldPermissions limitFieldPermissions(FieldPermissions limitedBy) { if (hasFieldLevelSecurity() && limitedBy != null && limitedBy.hasFieldLevelSecurity()) { // TODO: cache the automaton computation with FieldPermissionsCache - Automaton _permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); + Automaton _permittedFieldsAutomaton = Automatons.intersectAndDeterminize( + getIncludeAutomaton(), + limitedBy.getIncludeAutomaton() + ); return new FieldPermissions( CollectionUtils.concatLists(fieldPermissionsDefinitions, limitedBy.fieldPermissionsDefinitions), _permittedFieldsAutomaton diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java index 46261937a0228..a1e14bfde8aa5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java @@ -107,7 +107,7 @@ FieldPermissions union(Collection fieldPermissionsCollection) List automatonList = fieldPermissionsCollection.stream() .map(FieldPermissions::getIncludeAutomaton) .collect(Collectors.toList()); - return new FieldPermissions(key, Automatons.unionAndMinimize(automatonList)); + return new FieldPermissions(key, Automatons.unionAndDeterminize(automatonList)); }); } catch (ExecutionException e) { throw new ElasticsearchException("unable to compute field permissions", e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 558f8e6f22ac1..cdd5a6f6ff72d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -283,14 +283,14 @@ public boolean checkResourcePrivileges( for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { - checkIndexAutomaton = Automatons.minusAndMinimize(checkIndexAutomaton, restrictedIndices.getAutomaton()); + checkIndexAutomaton = Automatons.minusAndDeterminize(checkIndexAutomaton, restrictedIndices.getAutomaton()); } if (false == Operations.isEmpty(checkIndexAutomaton)) { Automaton allowedIndexPrivilegesAutomaton = null; for (var indexAndPrivilegeAutomaton : indexGroupAutomatons.entrySet()) { if (Automatons.subsetOf(checkIndexAutomaton, indexAndPrivilegeAutomaton.getValue())) { if (allowedIndexPrivilegesAutomaton != null) { - allowedIndexPrivilegesAutomaton = Automatons.unionAndMinimize( + allowedIndexPrivilegesAutomaton = Automatons.unionAndDeterminize( Arrays.asList(allowedIndexPrivilegesAutomaton, indexAndPrivilegeAutomaton.getKey()) ); } else { @@ -342,7 +342,7 @@ public Automaton allowedActionsMatcher(String index) { automatonList.add(group.privilege.getAutomaton()); } } - return automatonList.isEmpty() ? Automatons.EMPTY : Automatons.unionAndMinimize(automatonList); + return automatonList.isEmpty() ? Automatons.EMPTY : Automatons.unionAndDeterminize(automatonList); } /** @@ -704,7 +704,7 @@ private Map indexGroupAutomatons(boolean combine) { Automaton indexAutomaton = group.getIndexMatcherAutomaton(); allAutomatons.compute( group.privilege().getAutomaton(), - (key, value) -> value == null ? indexAutomaton : Automatons.unionAndMinimize(List.of(value, indexAutomaton)) + (key, value) -> value == null ? indexAutomaton : Automatons.unionAndDeterminize(List.of(value, indexAutomaton)) ); if (combine) { List> combinedAutomatons = new ArrayList<>(); @@ -714,7 +714,7 @@ private Map indexGroupAutomatons(boolean combine) { group.privilege().getAutomaton() ); if (Operations.isEmpty(intersectingPrivileges) == false) { - Automaton indexPatternAutomaton = Automatons.unionAndMinimize( + Automaton indexPatternAutomaton = Automatons.unionAndDeterminize( List.of(indexAndPrivilegeAutomatons.getValue(), indexAutomaton) ); combinedAutomatons.add(new Tuple<>(intersectingPrivileges, indexPatternAutomaton)); @@ -723,7 +723,7 @@ private Map indexGroupAutomatons(boolean combine) { combinedAutomatons.forEach( automatons -> allAutomatons.compute( automatons.v1(), - (key, value) -> value == null ? automatons.v2() : Automatons.unionAndMinimize(List.of(value, automatons.v2())) + (key, value) -> value == null ? automatons.v2() : Automatons.unionAndDeterminize(List.of(value, automatons.v2())) ) ); } @@ -768,7 +768,7 @@ public Group( this.indexNameMatcher = StringMatcher.of(indices).and(name -> restrictedIndices.isRestricted(name) == false); this.indexNameAutomaton = () -> indexNameAutomatonMemo.computeIfAbsent( indices, - k -> Automatons.minusAndMinimize(Automatons.patterns(indices), restrictedIndices.getAutomaton()) + k -> Automatons.minusAndDeterminize(Automatons.patterns(indices), restrictedIndices.getAutomaton()) ); } this.fieldPermissions = Objects.requireNonNull(fieldPermissions); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java index ea32ba13ae576..e4d283aba75a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java @@ -212,7 +212,7 @@ public IsResourceAuthorizedPredicate allowedIndicesMatcher(String action) { public Automaton allowedActionsMatcher(String index) { final Automaton allowedMatcher = baseRole.allowedActionsMatcher(index); final Automaton limitedByMatcher = limitedByRole.allowedActionsMatcher(index); - return Automatons.intersectAndMinimize(allowedMatcher, limitedByMatcher); + return Automatons.intersectAndDeterminize(allowedMatcher, limitedByMatcher); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 7174b2f616c2a..f4df99dcefea4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -57,7 +57,7 @@ import static java.util.Map.entry; import static org.elasticsearch.xpack.core.security.support.Automatons.patterns; -import static org.elasticsearch.xpack.core.security.support.Automatons.unionAndMinimize; +import static org.elasticsearch.xpack.core.security.support.Automatons.unionAndDeterminize; /** * The name of an index related action always being with `indices:` followed by a sequence of slash-separated terms @@ -110,7 +110,7 @@ public final class IndexPrivilege extends Privilege { private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", TransportAutoPutMappingAction.TYPE.name()); private static final Automaton MONITOR_AUTOMATON = patterns("indices:monitor/*"); - private static final Automaton MANAGE_AUTOMATON = unionAndMinimize( + private static final Automaton MANAGE_AUTOMATON = unionAndDeterminize( Arrays.asList( MONITOR_AUTOMATON, patterns("indices:admin/*", TransportFieldCapabilitiesAction.NAME + "*", GetRollupIndexCapsAction.NAME + "*") @@ -303,7 +303,7 @@ private static IndexPrivilege resolve(Set name) { if (actions.isEmpty() == false) { automata.add(patterns(actions)); } - return new IndexPrivilege(name, unionAndMinimize(automata)); + return new IndexPrivilege(name, unionAndDeterminize(automata)); } static Map values() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java index 201cb4b69e472..d3790ea64ba4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -112,7 +112,7 @@ public static Automaton patterns(Collection patterns) { private static Automaton buildAutomaton(Collection patterns) { if (patterns.size() == 1) { - return minimize(pattern(patterns.iterator().next())); + return determinize(pattern(patterns.iterator().next())); } final Function, Automaton> build = strings -> { @@ -121,7 +121,7 @@ private static Automaton buildAutomaton(Collection patterns) { final Automaton patternAutomaton = pattern(pattern); automata.add(patternAutomaton); } - return unionAndMinimize(automata); + return unionAndDeterminize(automata); }; // We originally just compiled each automaton separately and then unioned them all. @@ -188,7 +188,7 @@ private static Automaton buildAutomaton(Collection patterns) { if (misc.isEmpty() == false) { automata.add(build.apply(misc)); } - return unionAndMinimize(automata); + return unionAndDeterminize(automata); } /** @@ -277,22 +277,22 @@ static Automaton wildcard(String text) { return Operations.determinize(concatenate(automata), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } - public static Automaton unionAndMinimize(Collection automata) { + public static Automaton unionAndDeterminize(Collection automata) { Automaton res = automata.size() == 1 ? automata.iterator().next() : union(automata); - return minimize(res); + return determinize(res); } - public static Automaton minusAndMinimize(Automaton a1, Automaton a2) { + public static Automaton minusAndDeterminize(Automaton a1, Automaton a2) { Automaton res = minus(a1, a2, maxDeterminizedStates); - return minimize(res); + return determinize(res); } - public static Automaton intersectAndMinimize(Automaton a1, Automaton a2) { + public static Automaton intersectAndDeterminize(Automaton a1, Automaton a2) { Automaton res = intersection(a1, a2); - return minimize(res); + return determinize(res); } - private static Automaton minimize(Automaton automaton) { + private static Automaton determinize(Automaton automaton) { return Operations.determinize(automaton, maxDeterminizedStates); } From cf3c77a9c4dc89d2070cd84d12cfc1426632358e Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Tue, 22 Oct 2024 10:57:15 -0400 Subject: [PATCH 020/202] Add upper and lower max chunk size limits to ChunkingSettings (#115130) * Add upper and lower max chunk size limits to ChunkingSettings * Fix ServiceUtils tests --------- Co-authored-by: Elastic Machine --- .../SentenceBoundaryChunkingSettings.java | 6 ++- .../WordBoundaryChunkingSettings.java | 6 ++- .../inference/services/ServiceUtils.java | 26 +++++++++ .../ChunkingSettingsBuilderTests.java | 14 ++--- .../chunking/ChunkingSettingsTests.java | 8 +-- .../SentenceBoundaryChunkerTests.java | 3 +- ...SentenceBoundaryChunkingSettingsTests.java | 9 ++-- .../chunking/WordBoundaryChunkerTests.java | 2 +- .../WordBoundaryChunkingSettingsTests.java | 31 +++-------- .../inference/services/ServiceUtilsTests.java | 54 +++++++++++++++++++ 10 files changed, 117 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java index 04a07eeb984ec..def52e97666f9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java @@ -29,6 +29,8 @@ public class SentenceBoundaryChunkingSettings implements ChunkingSettings { public static final String NAME = "SentenceBoundaryChunkingSettings"; private static final ChunkingStrategy STRATEGY = ChunkingStrategy.SENTENCE; + private static final int MAX_CHUNK_SIZE_LOWER_LIMIT = 20; + private static final int MAX_CHUNK_SIZE_UPPER_LIMIT = 300; private static final Set VALID_KEYS = Set.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), @@ -62,9 +64,11 @@ public static SentenceBoundaryChunkingSettings fromMap(Map map) ); } - Integer maxChunkSize = ServiceUtils.extractRequiredPositiveInteger( + Integer maxChunkSize = ServiceUtils.extractRequiredPositiveIntegerBetween( map, ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), + MAX_CHUNK_SIZE_LOWER_LIMIT, + MAX_CHUNK_SIZE_UPPER_LIMIT, ModelConfigurations.CHUNKING_SETTINGS, validationException ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java index 5b91e122b9c80..7fb0fdc91bf72 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java @@ -28,6 +28,8 @@ public class WordBoundaryChunkingSettings implements ChunkingSettings { public static final String NAME = "WordBoundaryChunkingSettings"; private static final ChunkingStrategy STRATEGY = ChunkingStrategy.WORD; + private static final int MAX_CHUNK_SIZE_LOWER_LIMIT = 10; + private static final int MAX_CHUNK_SIZE_UPPER_LIMIT = 300; private static final Set VALID_KEYS = Set.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), @@ -56,9 +58,11 @@ public static WordBoundaryChunkingSettings fromMap(Map map) { ); } - Integer maxChunkSize = ServiceUtils.extractRequiredPositiveInteger( + Integer maxChunkSize = ServiceUtils.extractRequiredPositiveIntegerBetween( map, ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), + MAX_CHUNK_SIZE_LOWER_LIMIT, + MAX_CHUNK_SIZE_UPPER_LIMIT, ModelConfigurations.CHUNKING_SETTINGS, validationException ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index c0e3c78b12f13..9e7f8712b4087 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -435,6 +435,32 @@ public static Integer extractRequiredPositiveIntegerLessThanOrEqualToMax( return field; } + public static Integer extractRequiredPositiveIntegerBetween( + Map map, + String settingName, + int minValue, + int maxValue, + String scope, + ValidationException validationException + ) { + Integer field = extractRequiredPositiveInteger(map, settingName, scope, validationException); + + if (field != null && field < minValue) { + validationException.addValidationError( + ServiceUtils.mustBeGreaterThanOrEqualNumberErrorMessage(settingName, scope, field, minValue) + ); + return null; + } + if (field != null && field > maxValue) { + validationException.addValidationError( + ServiceUtils.mustBeLessThanOrEqualNumberErrorMessage(settingName, scope, field, maxValue) + ); + return null; + } + + return field; + } + public static Integer extractOptionalPositiveInteger( Map map, String settingName, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java index 5b9625073e6c6..235a3730ce4f6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java @@ -38,25 +38,27 @@ public void testValidChunkingSettingsMap() { } private Map, ChunkingSettings> chunkingSettingsMapToChunkingSettings() { - var maxChunkSize = randomNonNegativeInt(); - var overlap = randomIntBetween(1, maxChunkSize / 2); + var maxChunkSizeWordBoundaryChunkingSettings = randomIntBetween(10, 300); + var overlap = randomIntBetween(1, maxChunkSizeWordBoundaryChunkingSettings / 2); + var maxChunkSizeSentenceBoundaryChunkingSettings = randomIntBetween(20, 300); + return Map.of( Map.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingStrategy.WORD.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), - maxChunkSize, + maxChunkSizeWordBoundaryChunkingSettings, ChunkingSettingsOptions.OVERLAP.toString(), overlap ), - new WordBoundaryChunkingSettings(maxChunkSize, overlap), + new WordBoundaryChunkingSettings(maxChunkSizeWordBoundaryChunkingSettings, overlap), Map.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingStrategy.SENTENCE.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), - maxChunkSize + maxChunkSizeSentenceBoundaryChunkingSettings ), - new SentenceBoundaryChunkingSettings(maxChunkSize, 1) + new SentenceBoundaryChunkingSettings(maxChunkSizeSentenceBoundaryChunkingSettings, 1) ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java index 8373ae93354b1..2832c2f64e0e6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java @@ -21,11 +21,11 @@ public static ChunkingSettings createRandomChunkingSettings() { switch (randomStrategy) { case WORD -> { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); return new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); } case SENTENCE -> { - return new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), randomBoolean() ? 0 : 1); + return new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), randomBoolean() ? 0 : 1); } default -> throw new IllegalArgumentException("Unsupported random strategy [" + randomStrategy + "]"); } @@ -38,13 +38,13 @@ public static Map createRandomChunkingSettingsMap() { switch (randomStrategy) { case WORD -> { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), maxChunkSize); chunkingSettingsMap.put(ChunkingSettingsOptions.OVERLAP.toString(), randomIntBetween(1, maxChunkSize / 2)); } case SENTENCE -> { - chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), randomNonNegativeInt()); + chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), randomIntBetween(20, 300)); } default -> { } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java index 5687ebc4dbae7..afce8c57e0350 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java @@ -318,7 +318,8 @@ public void testChunkSplitLargeChunkSizesWithChunkingSettings() { } public void testInvalidChunkingSettingsProvided() { - ChunkingSettings chunkingSettings = new WordBoundaryChunkingSettings(randomNonNegativeInt(), randomNonNegativeInt()); + var maxChunkSize = randomIntBetween(10, 300); + ChunkingSettings chunkingSettings = new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); assertThrows(IllegalArgumentException.class, () -> { new SentenceBoundaryChunker().chunk(TEST_TEXT, chunkingSettings); }); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java index fe97d7eb3af54..47a1a116ba21e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.inference.ChunkingStrategy; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.HashMap; @@ -28,14 +27,14 @@ public void testMaxChunkSizeNotProvided() { } public void testInvalidInputsProvided() { - var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomNonNegativeInt())); + var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomIntBetween(20, 300))); chunkingSettingsMap.put(randomAlphaOfLength(10), randomNonNegativeInt()); assertThrows(ValidationException.class, () -> { SentenceBoundaryChunkingSettings.fromMap(chunkingSettingsMap); }); } public void testValidInputsProvided() { - int maxChunkSize = randomNonNegativeInt(); + int maxChunkSize = randomIntBetween(20, 300); SentenceBoundaryChunkingSettings settings = SentenceBoundaryChunkingSettings.fromMap( buildChunkingSettingsMap(Optional.of(maxChunkSize)) ); @@ -59,12 +58,12 @@ protected Writeable.Reader instanceReader() { @Override protected SentenceBoundaryChunkingSettings createTestInstance() { - return new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), randomBoolean() ? 0 : 1); + return new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), randomBoolean() ? 0 : 1); } @Override protected SentenceBoundaryChunkingSettings mutateInstance(SentenceBoundaryChunkingSettings instance) throws IOException { - var chunkSize = randomValueOtherThan(instance.maxChunkSize, ESTestCase::randomNonNegativeInt); + var chunkSize = randomValueOtherThan(instance.maxChunkSize, () -> randomIntBetween(20, 300)); return new SentenceBoundaryChunkingSettings(chunkSize, instance.sentenceOverlap); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java index 08c0724f36270..ef643a4b36fdc 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java @@ -136,7 +136,7 @@ public void testNumberOfChunksWithWordBoundaryChunkingSettings() { } public void testInvalidChunkingSettingsProvided() { - ChunkingSettings chunkingSettings = new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), 0); + ChunkingSettings chunkingSettings = new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), 0); assertThrows(IllegalArgumentException.class, () -> { new WordBoundaryChunker().chunk(TEST_TEXT, chunkingSettings); }); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java index c5515f7bf0512..dd91a3c7a947e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java @@ -14,7 +14,6 @@ import java.io.IOException; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; @@ -28,19 +27,20 @@ public void testMaxChunkSizeNotProvided() { public void testOverlapNotProvided() { assertThrows(ValidationException.class, () -> { - WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(randomNonNegativeInt()), Optional.empty())); + WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(randomIntBetween(10, 300)), Optional.empty())); }); } public void testInvalidInputsProvided() { - var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomNonNegativeInt()), Optional.of(randomNonNegativeInt())); + var maxChunkSize = randomIntBetween(10, 300); + var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(randomIntBetween(1, maxChunkSize / 2))); chunkingSettingsMap.put(randomAlphaOfLength(10), randomNonNegativeInt()); assertThrows(ValidationException.class, () -> { WordBoundaryChunkingSettings.fromMap(chunkingSettingsMap); }); } public void testOverlapGreaterThanHalfMaxChunkSize() { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); var overlap = randomIntBetween((maxChunkSize / 2) + 1, maxChunkSize); assertThrows(ValidationException.class, () -> { WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(overlap))); @@ -48,7 +48,7 @@ public void testOverlapGreaterThanHalfMaxChunkSize() { } public void testValidInputsProvided() { - int maxChunkSize = randomNonNegativeInt(); + int maxChunkSize = randomIntBetween(10, 300); int overlap = randomIntBetween(1, maxChunkSize / 2); WordBoundaryChunkingSettings settings = WordBoundaryChunkingSettings.fromMap( buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(overlap)) @@ -75,29 +75,14 @@ protected Writeable.Reader instanceReader() { @Override protected WordBoundaryChunkingSettings createTestInstance() { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); return new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); } @Override protected WordBoundaryChunkingSettings mutateInstance(WordBoundaryChunkingSettings instance) throws IOException { - var valueToMutate = randomFrom(List.of(ChunkingSettingsOptions.MAX_CHUNK_SIZE, ChunkingSettingsOptions.OVERLAP)); - var maxChunkSize = instance.maxChunkSize; - var overlap = instance.overlap; - - if (valueToMutate.equals(ChunkingSettingsOptions.MAX_CHUNK_SIZE)) { - while (maxChunkSize == instance.maxChunkSize) { - maxChunkSize = randomNonNegativeInt(); - } - - if (overlap > maxChunkSize / 2) { - overlap = randomIntBetween(1, maxChunkSize / 2); - } - } else if (valueToMutate.equals(ChunkingSettingsOptions.OVERLAP)) { - while (overlap == instance.overlap) { - overlap = randomIntBetween(1, maxChunkSize / 2); - } - } + var maxChunkSize = randomValueOtherThan(instance.maxChunkSize, () -> randomIntBetween(10, 300)); + var overlap = randomValueOtherThan(instance.overlap, () -> randomIntBetween(1, maxChunkSize / 2)); return new WordBoundaryChunkingSettings(maxChunkSize, overlap); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java index ca48d5427d18b..e3df0f0b5a2e1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java @@ -605,6 +605,60 @@ public void testExtractRequiredPositiveIntegerLessThanOrEqualToMax_AddsErrorWhen assertThat(validation.validationErrors().get(1), is("[scope] does not contain the required setting [not_key]")); } + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsBetweenMinAndMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 2, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, randomIntBetween(minValue + 1, maxValue - 1)); + } + + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMin() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 1, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, minValue); + } + + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 1, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, maxValue); + } + + private void testExtractRequiredPositiveIntegerBetween_Successful(int minValue, int maxValue, int actualValue) { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", actualValue)); + var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation); + + assertThat(validation.validationErrors(), hasSize(1)); + assertNotNull(parsedInt); + assertThat(parsedInt, is(actualValue)); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredIntBetween_AddsErrorForValueBelowMin() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue, minValue + 10); + testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, minValue - 1); + } + + public void testExtractRequiredIntBetween_AddsErrorForValueAboveMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue, minValue + 10); + testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, maxValue + 1); + } + + private void testExtractRequiredIntBetween_Unsuccessful(int minValue, int maxValue, int actualValue) { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", actualValue)); + var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation); + + assertThat(validation.validationErrors(), hasSize(2)); + assertNull(parsedInt); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(1), containsString("Invalid value")); + } + public void testExtractOptionalEnum_ReturnsNull_WhenFieldDoesNotExist() { var validation = new ValidationException(); Map map = modifiableMap(Map.of("key", "value")); From aee76c5123df08705eec24bee5cb0158b47267ed Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 02:03:14 +1100 Subject: [PATCH 021/202] Mute org.elasticsearch.xpack.analytics.rate.TimeSeriesRateAggregatorTests org.elasticsearch.xpack.analytics.rate.TimeSeriesRateAggregatorTests #115334 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1cb8baa96a942..847f9af13801c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -326,6 +326,8 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 +- class: org.elasticsearch.xpack.analytics.rate.TimeSeriesRateAggregatorTests + issue: https://github.com/elastic/elasticsearch/issues/115334 # Examples: # From 2f695ef1cca152873911ebaf9eaa6abfaee4d1e7 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 22 Oct 2024 17:10:44 +0200 Subject: [PATCH 022/202] Relax condition for H3 bins crossing the dateline (#115290) Just check the for h3 bins crossing the dateline, that the center child relationship returns something different to Disjoint. --- .../aggregations/bucket/geogrid/GeoHexVisitorTests.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java index 8e2f713e6ed3e..d9cbbaafa1779 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java @@ -83,9 +83,10 @@ private void doTestGeometry(LongFunction h3ToGeometry, boolean hasArea visitor.reset(centerChild); reader.visit(visitor); if (hasArea) { - if (h3CrossesDateline && visitor.getLeftX() > visitor.getRightX()) { - // if both polygons crosses the dateline it cannot be inside due to the polygon splitting technique - assertEquals("failing h3: " + h3, GeoRelation.QUERY_CROSSES, visitor.relation()); + if (h3CrossesDateline) { + // if the h3 crosses the dateline, we might get CROSSES due to the polygon splitting technique. We can't + // be sure which one is the correct one, so we just check that it is not DISJOINT + assertNotSame("failing h3: " + h3, GeoRelation.QUERY_DISJOINT, visitor.relation()); } else { assertEquals("failing h3: " + h3, GeoRelation.QUERY_INSIDE, visitor.relation()); } From fab22654bddb364dcf75db15b1d77ee5faba92fd Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 02:15:19 +1100 Subject: [PATCH 023/202] Mute org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval #115339 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 847f9af13801c..a7fb95691283b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -328,6 +328,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115231 - class: org.elasticsearch.xpack.analytics.rate.TimeSeriesRateAggregatorTests issue: https://github.com/elastic/elasticsearch/issues/115334 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115339 # Examples: # From 14c25fed66d80bc1b55106cc48a225f3449e5810 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 22 Oct 2024 17:25:32 +0200 Subject: [PATCH 024/202] Unmute MultiVersionRepositoryAccessIT testCreateAndRestoreSnapshot (#115326) These are either falsely marked as failure, or all related to a test setup issue that has been fixed. closes https://github.com/elastic/elasticsearch/pull/114998 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index a7fb95691283b..62140d05c9e4a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -308,9 +308,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testCreateAndRestoreSnapshot - issue: https://github.com/elastic/elasticsearch/issues/114998 - class: org.elasticsearch.index.mapper.TextFieldMapperTests method: testBlockLoaderFromRowStrideReaderWithSyntheticSource issue: https://github.com/elastic/elasticsearch/issues/115066 From d9baf6f9db1761b5a777c98e4bac82abf93598c0 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 22 Oct 2024 17:43:31 +0200 Subject: [PATCH 025/202] Revert "Add ResolvedExpression wrapper (#114592)" (#115317) This reverts commit 4c15cc077887d00ecf0e02c39b42cf01874ab6c4. This commit introduced an orders of magnitude regression when searching many shards. --- docs/changelog/115317.yaml | 5 + .../TransportClusterSearchShardsAction.java | 3 +- .../indices/resolve/ResolveIndexAction.java | 9 +- .../query/TransportValidateQueryAction.java | 3 +- .../explain/TransportExplainAction.java | 3 +- .../action/search/TransportSearchAction.java | 24 +- .../search/TransportSearchShardsAction.java | 6 +- .../metadata/IndexNameExpressionResolver.java | 196 +++++------- .../elasticsearch/indices/IndicesService.java | 3 +- .../elasticsearch/search/SearchService.java | 3 +- .../indices/resolve/ResolveIndexTests.java | 15 +- .../DateMathExpressionResolverTests.java | 89 +++--- .../cluster/metadata/ExpressionListTests.java | 108 +++---- .../IndexNameExpressionResolverTests.java | 65 ++-- .../WildcardExpressionResolverTests.java | 299 ++++++++---------- .../indices/IndicesServiceTests.java | 34 +- 16 files changed, 361 insertions(+), 504 deletions(-) create mode 100644 docs/changelog/115317.yaml diff --git a/docs/changelog/115317.yaml b/docs/changelog/115317.yaml new file mode 100644 index 0000000000000..153f7a52f0674 --- /dev/null +++ b/docs/changelog/115317.yaml @@ -0,0 +1,5 @@ +pr: 115317 +summary: Revert "Add `ResolvedExpression` wrapper" +area: Indices APIs +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index b855f2cee7613..9ffef1f178f44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -85,7 +84,7 @@ protected void masterOperation( String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Map indicesAndFilters = new HashMap<>(); - Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (String index : concreteIndices) { final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, indicesAndAliases); final String[] aliases = indexNameExpressionResolver.indexAliases( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..5c5c71bc002b3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -566,8 +565,8 @@ static void resolveIndices( if (names.length == 1 && (Metadata.ALL.equals(names[0]) || Regex.isMatchAllPattern(names[0]))) { names = new String[] { "**" }; } - Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); - for (ResolvedExpression s : resolvedIndexAbstractions) { + Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); + for (String s : resolvedIndexAbstractions) { enrichIndexAbstraction(clusterState, s, indices, aliases, dataStreams); } indices.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); @@ -598,12 +597,12 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + String indexAbstraction, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index e01f364712676..4e9830fe0d14e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -134,7 +133,7 @@ protected void doExecute(Task task, ValidateQueryRequest request, ActionListener @Override protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) { final ClusterState clusterState = clusterService.state(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, shard.getIndexName(), indicesAndAliases); return new ShardValidateQueryRequest(shard.shardId(), aliasFilter, request); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 84c6df7b8a66f..9c82d032014f2 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; @@ -110,7 +109,7 @@ protected boolean resolveIndex(ExplainRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(), indicesAndAliases); request.request().filteringAlias(aliasFilter); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index b5864f64a7824..1645a378446a4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -111,7 +110,6 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; -import java.util.stream.Collectors; import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; @@ -205,7 +203,7 @@ public TransportSearchAction( private Map buildPerIndexOriginalIndices( ClusterState clusterState, - Set indicesAndAliases, + Set indicesAndAliases, String[] indices, IndicesOptions indicesOptions ) { @@ -213,9 +211,6 @@ private Map buildPerIndexOriginalIndices( var blocks = clusterState.blocks(); // optimization: mostly we do not have any blocks so there's no point in the expensive per-index checking boolean hasBlocks = blocks.global().isEmpty() == false || blocks.indices().isEmpty() == false; - // Get a distinct set of index abstraction names present from the resolved expressions to help with the reverse resolution from - // concrete index to the expression that produced it. - Set indicesAndAliasesResources = indicesAndAliases.stream().map(ResolvedExpression::resource).collect(Collectors.toSet()); for (String index : indices) { if (hasBlocks) { blocks.indexBlockedRaiseException(ClusterBlockLevel.READ, index); @@ -232,8 +227,8 @@ private Map buildPerIndexOriginalIndices( String[] finalIndices = Strings.EMPTY_ARRAY; if (aliases == null || aliases.length == 0 - || indicesAndAliasesResources.contains(index) - || hasDataStreamRef(clusterState, indicesAndAliasesResources, index)) { + || indicesAndAliases.contains(index) + || hasDataStreamRef(clusterState, indicesAndAliases, index)) { finalIndices = new String[] { index }; } if (aliases != null) { @@ -252,11 +247,7 @@ private static boolean hasDataStreamRef(ClusterState clusterState, Set i return indicesAndAliases.contains(ret.getParentDataStream().getName()); } - Map buildIndexAliasFilters( - ClusterState clusterState, - Set indicesAndAliases, - Index[] concreteIndices - ) { + Map buildIndexAliasFilters(ClusterState clusterState, Set indicesAndAliases, Index[] concreteIndices) { final Map aliasFilterMap = new HashMap<>(); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); @@ -1246,10 +1237,7 @@ private void executeSearch( } else { final Index[] indices = resolvedIndices.getConcreteLocalIndices(); concreteLocalIndices = Arrays.stream(indices).map(Index::getName).toArray(String[]::new); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( - clusterState, - searchRequest.indices() - ); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); aliasFilter = buildIndexAliasFilters(clusterState, indicesAndAliases, indices); aliasFilter.putAll(remoteAliasMap); localShardIterators = getLocalShardsIterator( @@ -1822,7 +1810,7 @@ List getLocalShardsIterator( ClusterState clusterState, SearchRequest searchRequest, String clusterAlias, - Set indicesAndAliases, + Set indicesAndAliases, String[] concreteIndices ) { var routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index b94bd95c93d8a..f418b5617b2a1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.index.Index; @@ -128,10 +127,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices, null), listener.delegateFailureAndWrap((delegate, searchRequest) -> { Index[] concreteIndices = resolvedIndices.getConcreteLocalIndices(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( - clusterState, - searchRequest.indices() - ); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); final Map aliasFilters = transportSearchAction.buildIndexAliasFilters( clusterState, indicesAndAliases, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index eaf54034b22e0..2229166a2d779 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -74,15 +74,6 @@ public IndexNameExpressionResolver(ThreadContext threadContext, SystemIndices sy this.systemIndices = Objects.requireNonNull(systemIndices, "System Indices must not be null"); } - /** - * This contains the resolved expression in the form of the resource. - * Soon it will facilitate the index component selector. - * @param resource the resolved resolvedExpression - */ - public record ResolvedExpression(String resource) { - - } - /** * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request. @@ -200,9 +191,8 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressions(context, indexExpressions); return expressions.stream() - .map(ResolvedExpression::resource) .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) .filter(ia -> ia.getType() == Type.DATA_STREAM) @@ -231,11 +221,10 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, request.index()); + final Collection expressions = resolveExpressions(context, request.index()); if (expressions.size() == 1) { - ResolvedExpression resolvedExpression = expressions.iterator().next(); - IndexAbstraction ia = state.metadata().getIndicesLookup().get(resolvedExpression.resource()); + IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.iterator().next()); if (ia.getType() == Type.ALIAS) { Index writeIndex = ia.getWriteIndex(); if (writeIndex == null) { @@ -257,14 +246,14 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit } } - protected static Collection resolveExpressions(Context context, String... expressions) { + protected static Collection resolveExpressions(Context context, String... expressions) { if (context.getOptions().expandWildcardExpressions() == false) { if (expressions == null || expressions.length == 0 || expressions.length == 1 && Metadata.ALL.equals(expressions[0])) { return List.of(); } else { return ExplicitResourceNameFilter.filterUnavailable( context, - DateMathExpressionResolver.resolve(context, Arrays.stream(expressions).map(ResolvedExpression::new).toList()) + DateMathExpressionResolver.resolve(context, List.of(expressions)) ); } } else { @@ -275,10 +264,7 @@ protected static Collection resolveExpressions(Context conte } else { return WildcardExpressionResolver.resolve( context, - ExplicitResourceNameFilter.filterUnavailable( - context, - DateMathExpressionResolver.resolve(context, Arrays.stream(expressions).map(ResolvedExpression::new).toList()) - ) + ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) ); } } @@ -353,12 +339,12 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressions(context, indexExpressions); final Set concreteIndicesResult = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); final Map indicesLookup = context.getState().metadata().getIndicesLookup(); - for (ResolvedExpression resolvedExpression : expressions) { - final IndexAbstraction indexAbstraction = indicesLookup.get(resolvedExpression.resource()); + for (String expression : expressions) { + final IndexAbstraction indexAbstraction = indicesLookup.get(expression); assert indexAbstraction != null; if (indexAbstraction.getType() == Type.ALIAS && context.isResolveToWriteIndex()) { Index writeIndex = indexAbstraction.getWriteIndex(); @@ -392,7 +378,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { throw new IllegalArgumentException( indexAbstraction.getType().getDisplayName() + " [" - + resolvedExpression.resource() + + expression + "] has more than one index associated with it " + Arrays.toString(indexNames) + ", can't execute a single index op" @@ -656,7 +642,7 @@ public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { * Utility method that allows to resolve an index expression to its corresponding single write index. * * @param state the cluster state containing all the data to resolve to expression to a concrete index - * @param request The request that defines how an alias or an index need to be resolved to a concrete index + * @param request The request that defines how the an alias or an index need to be resolved to a concrete index * and the expression that can be resolved to an alias or an index name. * @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index * @return the write index obtained as a result of the index resolution @@ -748,7 +734,7 @@ public static String resolveDateMathExpression(String dateExpression, long time) /** * Resolve an array of expressions to the set of indices and aliases that these expressions match. */ - public Set resolveExpressions(ClusterState state, String... expressions) { + public Set resolveExpressions(ClusterState state, String... expressions) { return resolveExpressions(state, IndicesOptions.lenientExpandOpen(), false, expressions); } @@ -757,7 +743,7 @@ public Set resolveExpressions(ClusterState state, String... * If {@param preserveDataStreams} is {@code true}, datastreams that are covered by the wildcards from the * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. */ - public Set resolveExpressions( + public Set resolveExpressions( ClusterState state, IndicesOptions indicesOptions, boolean preserveDataStreams, @@ -774,10 +760,10 @@ public Set resolveExpressions( getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - Collection resolved = resolveExpressions(context, expressions); - if (resolved instanceof Set) { + Collection resolved = resolveExpressions(context, expressions); + if (resolved instanceof Set) { // unmodifiable without creating a new collection as it might contain many items - return Collections.unmodifiableSet((Set) resolved); + return Collections.unmodifiableSet((Set) resolved); } else { return Set.copyOf(resolved); } @@ -790,7 +776,7 @@ public Set resolveExpressions( * the index itself - null is returned. Returns {@code null} if no filtering is required. * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressions}. */ - public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { + public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { return indexAliases(state, index, AliasMetadata::filteringRequired, DataStreamAlias::filteringRequired, false, resolvedExpressions); } @@ -816,39 +802,39 @@ public String[] indexAliases( Predicate requiredAlias, Predicate requiredDataStreamAlias, boolean skipIdentity, - Set resolvedExpressions + Set resolvedExpressions ) { - if (isAllIndicesExpression(resolvedExpressions)) { + if (isAllIndices(resolvedExpressions)) { return null; } - Set resources = resolvedExpressions.stream().map(ResolvedExpression::resource).collect(Collectors.toSet()); + final IndexMetadata indexMetadata = state.metadata().getIndices().get(index); if (indexMetadata == null) { // Shouldn't happen throw new IndexNotFoundException(index); } - if (skipIdentity == false && resources.contains(index)) { + if (skipIdentity == false && resolvedExpressions.contains(index)) { return null; } IndexAbstraction ia = state.metadata().getIndicesLookup().get(index); DataStream dataStream = ia.getParentDataStream(); if (dataStream != null) { - if (skipIdentity == false && resources.contains(dataStream.getName())) { + if (skipIdentity == false && resolvedExpressions.contains(dataStream.getName())) { // skip the filters when the request targets the data stream name return null; } Map dataStreamAliases = state.metadata().dataStreamAliases(); List aliasesForDataStream; - if (iterateIndexAliases(dataStreamAliases.size(), resources.size())) { + if (iterateIndexAliases(dataStreamAliases.size(), resolvedExpressions.size())) { aliasesForDataStream = dataStreamAliases.values() .stream() - .filter(dataStreamAlias -> resources.contains(dataStreamAlias.getName())) + .filter(dataStreamAlias -> resolvedExpressions.contains(dataStreamAlias.getName())) .filter(dataStreamAlias -> dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); } else { - aliasesForDataStream = resources.stream() + aliasesForDataStream = resolvedExpressions.stream() .map(dataStreamAliases::get) .filter(dataStreamAlias -> dataStreamAlias != null && dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); @@ -873,15 +859,18 @@ public String[] indexAliases( } else { final Map indexAliases = indexMetadata.getAliases(); final AliasMetadata[] aliasCandidates; - if (iterateIndexAliases(indexAliases.size(), resources.size())) { + if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { // faster to iterate indexAliases aliasCandidates = indexAliases.values() .stream() - .filter(aliasMetadata -> resources.contains(aliasMetadata.alias())) + .filter(aliasMetadata -> resolvedExpressions.contains(aliasMetadata.alias())) .toArray(AliasMetadata[]::new); } else { // faster to iterate resolvedExpressions - aliasCandidates = resources.stream().map(indexAliases::get).filter(Objects::nonNull).toArray(AliasMetadata[]::new); + aliasCandidates = resolvedExpressions.stream() + .map(indexAliases::get) + .filter(Objects::nonNull) + .toArray(AliasMetadata[]::new); } List aliases = null; for (AliasMetadata aliasMetadata : aliasCandidates) { @@ -920,7 +909,12 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection resolvedExpressions = resolveExpressions(context, expressions); + final Collection resolvedExpressions = resolveExpressions(context, expressions); + + // TODO: it appears that this can never be true? + if (isAllIndices(resolvedExpressions)) { + return resolveSearchRoutingAllIndices(state.metadata(), routing); + } Map> routings = null; Set paramRouting = null; @@ -930,8 +924,8 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); } - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(resolvedExpression.resource); + for (String expression : resolvedExpressions) { + IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(expression); if (indexAbstraction != null && indexAbstraction.getType() == Type.ALIAS) { for (Index index : indexAbstraction.getIndices()) { String concreteIndex = index.getName(); @@ -969,7 +963,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab } } else { // Index - routings = collectRoutings(routings, paramRouting, norouting, resolvedExpression.resource()); + routings = collectRoutings(routings, paramRouting, norouting, expression); } } @@ -1015,17 +1009,6 @@ public static Map> resolveSearchRoutingAllIndices(Metadata m return null; } - /** - * Identifies whether the array containing index names given as argument refers to all indices - * The empty or null array identifies all indices - * - * @param aliasesOrIndices the array containing index names - * @return true if the provided array maps to all indices, false otherwise - */ - public static boolean isAllIndicesExpression(Collection aliasesOrIndices) { - return isAllIndices(aliasesOrIndices.stream().map(ResolvedExpression::resource).toList()); - } - /** * Identifies whether the array containing index names given as argument refers to all indices * The empty or null array identifies all indices @@ -1266,8 +1249,8 @@ private WildcardExpressionResolver() { * Returns all the indices, datastreams, and aliases, considering the open/closed, system, and hidden context parameters. * Depending on the context, returns the names of the datastreams themselves or their backing indices. */ - public static Collection resolveAll(Context context) { - List concreteIndices = resolveEmptyOrTrivialWildcard(context); + public static Collection resolveAll(Context context) { + List concreteIndices = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams() == false && context.getOptions().ignoreAliases()) { return concreteIndices; @@ -1282,7 +1265,7 @@ public static Collection resolveAll(Context context) { .filter(ia -> shouldIncludeIfDataStream(ia, context) || shouldIncludeIfAlias(ia, context)) .filter(ia -> ia.isSystem() == false || context.systemIndexAccessPredicate.test(ia.getName())); - Set resolved = expandToOpenClosed(context, ias).collect(Collectors.toSet()); + Set resolved = expandToOpenClosed(context, ias).collect(Collectors.toSet()); resolved.addAll(concreteIndices); return resolved; } @@ -1310,17 +1293,17 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres * ultimately returned, instead of the alias or datastream name * */ - public static Collection resolve(Context context, List expressions) { + public static Collection resolve(Context context, List expressions) { ExpressionList expressionList = new ExpressionList(context, expressions); // fast exit if there are no wildcards to evaluate if (expressionList.hasWildcard() == false) { return expressions; } - Set result = new HashSet<>(); + Set result = new HashSet<>(); for (ExpressionList.Expression expression : expressionList) { if (expression.isWildcard()) { Stream matchingResources = matchResourcesToWildcard(context, expression.get()); - Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); + Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); if (context.getOptions().allowNoIndices() == false) { emptyWildcardExpansion.set(true); @@ -1336,9 +1319,9 @@ public static Collection resolve(Context context, List filterIndicesLookupForSuffixWildcar * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ - private static Stream expandToOpenClosed(Context context, Stream resources) { + private static Stream expandToOpenClosed(Context context, Stream resources) { final IndexMetadata.State excludeState = excludeState(context.getOptions()); return resources.flatMap(indexAbstraction -> { if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) { - return Stream.of(new ResolvedExpression(indexAbstraction.getName())); + return Stream.of(indexAbstraction.getName()); } else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) { - return Stream.of(new ResolvedExpression(indexAbstraction.getName())); + return Stream.of(indexAbstraction.getName()); } else { Stream indicesStateStream = Stream.of(); if (shouldIncludeRegularIndices(context.getOptions())) { @@ -1451,20 +1434,18 @@ private static Stream expandToOpenClosed(Context context, St if (excludeState != null) { indicesStateStream = indicesStateStream.filter(indexMeta -> indexMeta.getState() != excludeState); } - return indicesStateStream.map(indexMeta -> new ResolvedExpression(indexMeta.getIndex().getName())); + return indicesStateStream.map(indexMeta -> indexMeta.getIndex().getName()); } }); } - private static List resolveEmptyOrTrivialWildcard(Context context) { + private static List resolveEmptyOrTrivialWildcard(Context context) { final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getState().metadata()); - Stream result; if (context.systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { - result = Arrays.stream(allIndices); + return List.of(allIndices); } else { - result = resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices).stream(); + return resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices); } - return result.map(ResolvedExpression::new).toList(); } private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context context, String[] allIndices) { @@ -1526,8 +1507,8 @@ private DateMathExpressionResolver() { // utility class } - public static List resolve(Context context, List expressions) { - List result = new ArrayList<>(expressions.size()); + public static List resolve(Context context, List expressions) { + List result = new ArrayList<>(expressions.size()); for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { result.add(resolveExpression(expression, context::getStartTime)); } @@ -1538,15 +1519,13 @@ static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static ResolvedExpression resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { - String result; + static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { if (expression.isExclusion()) { // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - result = "-" + resolveExpression(expression.get(), getTime); + return "-" + resolveExpression(expression.get(), getTime); } else { - result = resolveExpression(expression.get(), getTime); + return resolveExpression(expression.get(), getTime); } - return new ResolvedExpression(result); } static String resolveExpression(String expression, LongSupplier getTime) { @@ -1708,26 +1687,25 @@ private ExplicitResourceNameFilter() { * Returns an expression list with "unavailable" (missing or not acceptable) resource names filtered out. * Only explicit resource names are considered for filtering. Wildcard and exclusion expressions are kept in. */ - public static List filterUnavailable(Context context, List expressions) { + public static List filterUnavailable(Context context, List expressions) { ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - List result = new ArrayList<>(expressions.size()); + List result = new ArrayList<>(expressions.size()); for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { validateAliasOrIndex(expression); - if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression)) { - result.add(expression.resolvedExpression()); + if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { + result.add(expression.expression()); } } return result; } /** - * This returns `true` if the given {@param resolvedExpression} is of a resource that exists. - * Otherwise, it returns `false` if the `ignore_unavailable` option is `true`, or, if `false`, it throws a "not found" type of + * This returns `true` if the given {@param name} is of a resource that exists. + * Otherwise, it returns `false` if the `ignore_unvailable` option is `true`, or, if `false`, it throws a "not found" type of * exception. */ @Nullable - private static boolean ensureAliasOrIndexExists(Context context, ExpressionList.Expression expression) { - String name = expression.get(); + private static boolean ensureAliasOrIndexExists(Context context, String name) { boolean ignoreUnavailable = context.getOptions().ignoreUnavailable(); IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(name); if (indexAbstraction == null) { @@ -1759,37 +1737,32 @@ private static boolean ensureAliasOrIndexExists(Context context, ExpressionList. } private static void validateAliasOrIndex(ExpressionList.Expression expression) { - if (Strings.isEmpty(expression.resolvedExpression().resource())) { - throw notFoundException(expression.get()); + if (Strings.isEmpty(expression.expression())) { + throw notFoundException(expression.expression()); } // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown // if the expression can't be found. - if (expression.resolvedExpression().resource().charAt(0) == '_') { - throw new InvalidIndexNameException(expression.get(), "must not start with '_'."); + if (expression.expression().charAt(0) == '_') { + throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); } } - private static void ensureRemoteIndicesRequireIgnoreUnavailable( - IndicesOptions options, - List resolvedExpressions - ) { + private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { if (options.ignoreUnavailable()) { return; } - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - var index = resolvedExpression.resource(); + for (String index : indexExpressions) { if (RemoteClusterAware.isRemoteIndexName(index)) { - failOnRemoteIndicesNotIgnoringUnavailable(resolvedExpressions); + failOnRemoteIndicesNotIgnoringUnavailable(indexExpressions); } } } - private static void failOnRemoteIndicesNotIgnoringUnavailable(List resolvedExpressions) { + private static void failOnRemoteIndicesNotIgnoringUnavailable(List indexExpressions) { List crossClusterIndices = new ArrayList<>(); - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - String index = resolvedExpression.resource(); + for (String index : indexExpressions) { if (RemoteClusterAware.isRemoteIndexName(index)) { crossClusterIndices.add(index); } @@ -1807,13 +1780,13 @@ public static final class ExpressionList implements Iterable expressionsList; private final boolean hasWildcard; - public record Expression(ResolvedExpression resolvedExpression, boolean isWildcard, boolean isExclusion) { + public record Expression(String expression, boolean isWildcard, boolean isExclusion) { public String get() { if (isExclusion()) { // drop the leading "-" if exclusion because it is easier for callers to handle it like this - return resolvedExpression().resource().substring(1); + return expression().substring(1); } else { - return resolvedExpression().resource(); + return expression(); } } } @@ -1822,17 +1795,16 @@ public String get() { * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). * The {@param context} is used to check if wildcards ought to be considered or not. */ - public ExpressionList(Context context, List resolvedExpressions) { - List expressionsList = new ArrayList<>(resolvedExpressions.size()); + public ExpressionList(Context context, List expressionStrings) { + List expressionsList = new ArrayList<>(expressionStrings.size()); boolean wildcardSeen = false; - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - var expressionString = resolvedExpression.resource(); + for (String expressionString : expressionStrings) { boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { wildcardSeen = true; - expressionsList.add(new Expression(resolvedExpression, true, isExclusion)); + expressionsList.add(new Expression(expressionString, true, isExclusion)); } else { - expressionsList.add(new Expression(resolvedExpression, false, isExclusion)); + expressionsList.add(new Expression(expressionString, false, isExclusion)); } } this.expressionsList = expressionsList; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 2dc5e7c28ad0b..706f788e8a310 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -1714,7 +1713,7 @@ interface IndexDeletionAllowedPredicate { IndexSettings indexSettings) -> canDeleteIndexContents(index); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; - public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { + public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch * of dependencies we pass in a function that can perform the parsing. */ CheckedFunction filterParser = bytes -> { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 3a900a8a9b8a6..be96b4e25d841 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; @@ -1619,7 +1618,7 @@ public boolean isForceExecution() { } } - public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { + public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { return indicesService.buildAliasFilter(state, index, resolvedExpressions); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java index 1faeabb6acbf7..834bacd9e6a04 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -230,19 +229,9 @@ public void testResolveHiddenProperlyWithDateMath() { .metadata(buildMetadata(new Object[][] {}, indices)) .build(); String[] requestedIndex = new String[] { "" }; - Set resolvedIndices = resolver.resolveExpressions( - clusterState, - IndicesOptions.LENIENT_EXPAND_OPEN, - true, - requestedIndex - ); + Set resolvedIndices = resolver.resolveExpressions(clusterState, IndicesOptions.LENIENT_EXPAND_OPEN, true, requestedIndex); assertThat(resolvedIndices.size(), is(1)); - assertThat( - resolvedIndices, - contains( - oneOf(new ResolvedExpression("logs-pgsql-prod-" + todaySuffix), new ResolvedExpression("logs-pgsql-prod-" + tomorrowSuffix)) - ) - ); + assertThat(resolvedIndices, contains(oneOf("logs-pgsql-prod-" + todaySuffix, "logs-pgsql-prod-" + tomorrowSuffix))); } public void testSystemIndexAccess() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index fe0b7926229cb..6be5b48f9d723 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.DateMathExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -27,6 +26,7 @@ import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; @@ -52,11 +52,11 @@ private static String formatDate(String pattern, ZonedDateTime zonedDateTime) { public void testNormal() throws Exception { int numIndexExpressions = randomIntBetween(1, 9); - List indexExpressions = new ArrayList<>(numIndexExpressions); + List indexExpressions = new ArrayList<>(numIndexExpressions); for (int i = 0; i < numIndexExpressions; i++) { - indexExpressions.add(new ResolvedExpression(randomAlphaOfLength(10))); + indexExpressions.add(randomAlphaOfLength(10)); } - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(indexExpressions.size())); for (int i = 0; i < indexExpressions.size(); i++) { assertThat(result.get(i), equalTo(indexExpressions.get(i))); @@ -64,25 +64,25 @@ public void testNormal() throws Exception { } public void testExpression() throws Exception { - List indexExpressions = resolvedExpressions("<.marvel-{now}>", "<.watch_history-{now}>", ""); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(3)); - assertThat(result.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(1).resource(), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2).resource(), equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(1), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(2), equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpressionWithWildcardAndExclusions() { - List indexExpressions = resolvedExpressions( + List indexExpressions = Arrays.asList( "<-before-inner-{now}>", "-", "", "<-after-inner-{now}>", "-" ); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat( - result.stream().map(ResolvedExpression::resource).toList(), + result, Matchers.contains( equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), equalTo("-"), // doesn't evaluate because it doesn't start with "<" and it is not an exclusion @@ -98,7 +98,7 @@ public void testExpressionWithWildcardAndExclusions() { ); result = DateMathExpressionResolver.resolve(noWildcardExpandContext, indexExpressions); assertThat( - result.stream().map(ResolvedExpression::resource).toList(), + result, Matchers.contains( equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), // doesn't evaluate because it doesn't start with "<" and there can't be exclusions without wildcard expansion @@ -112,24 +112,21 @@ public void testExpressionWithWildcardAndExclusions() { } public void testEmpty() throws Exception { - List result = DateMathExpressionResolver.resolve(context, List.of()); + List result = DateMathExpressionResolver.resolve(context, Collections.emptyList()); assertThat(result.size(), equalTo(0)); } public void testExpression_Static() throws Exception { - List result = DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-test>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-test>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".marvel-test")); + assertThat(result.get(0), equalTo(".marvel-test")); } public void testExpression_MultiParts() throws Exception { - List result = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.text1-{now/d}-text2-{now/M}>") - ); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.text1-{now/d}-text2-{now/M}>")); assertThat(result.size(), equalTo(1)); assertThat( - result.get(0).resource(), + result.get(0), equalTo( ".text1-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())) @@ -140,42 +137,33 @@ public void testExpression_MultiParts() throws Exception { } public void testExpression_CustomFormat() throws Exception { - List results = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.marvel-{now/d{yyyy.MM.dd}}>") - ); + List results = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); assertThat(results.size(), equalTo(1)); - assertThat(results.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeStatic() throws Exception { - List result = DateMathExpressionResolver.resolve(context, resolvedExpressions("<.mar\\{v\\}el-{now/d}>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeDateFormat() throws Exception { - List result = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.marvel-{now/d{'\\{year\\}'yyyy}}>") - ); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); } public void testExpression_MixedArray() throws Exception { - List result = DateMathExpressionResolver.resolve( + List result = DateMathExpressionResolver.resolve( context, - resolvedExpressions("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") + Arrays.asList("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") ); assertThat(result.size(), equalTo(4)); - assertThat(result.get(0).resource(), equalTo("name1")); - assertThat(result.get(1).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2).resource(), equalTo("name2")); - assertThat( - result.get(3).resource(), - equalTo(".logstash-" + formatDate("uuuu.MM", dateFromMillis(context.getStartTime()).withDayOfMonth(1))) - ); + assertThat(result.get(0), equalTo("name1")); + assertThat(result.get(1), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(2), equalTo("name2")); + assertThat(result.get(3), equalTo(".logstash-" + formatDate("uuuu.MM", dateFromMillis(context.getStartTime()).withDayOfMonth(1)))); } public void testExpression_CustomTimeZoneInIndexName() throws Exception { @@ -214,19 +202,19 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { name -> false, name -> false ); - List results = DateMathExpressionResolver.resolve( + List results = DateMathExpressionResolver.resolve( context, - resolvedExpressions("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") + Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") ); assertThat(results.size(), equalTo(1)); logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); - assertThat(results.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); + assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); } public void testExpressionInvalidUnescaped() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.mar}vel-{now/d}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("invalid character at position [")); @@ -235,7 +223,7 @@ public void testExpressionInvalidUnescaped() throws Exception { public void testExpressionInvalidDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d{}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); @@ -244,7 +232,7 @@ public void testExpressionInvalidDateMathFormat() throws Exception { public void testExpressionInvalidEmptyDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d{}}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("missing date format")); @@ -253,13 +241,10 @@ public void testExpressionInvalidEmptyDateMathFormat() throws Exception { public void testExpressionInvalidOpenEnded() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java index 1df3bf4132b60..1ca59ff402bd8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java @@ -13,12 +13,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList.Expression; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.function.Supplier; @@ -41,13 +39,10 @@ public void testEmpty() { public void testExplicitSingleNameExpression() { for (IndicesOptions indicesOptions : List.of(getExpandWildcardsIndicesOptions(), getNoExpandWildcardsIndicesOptions())) { for (String expressionString : List.of("non_wildcard", "-non_exclusion")) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(indicesOptions), - resolvedExpressions(expressionString) - ); + ExpressionList expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); assertThat(expressionList.hasWildcard(), is(false)); if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(indicesOptions), resolvedExpressions((expressionString))); + expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); } Iterator expressionIterator = expressionList.iterator(); assertThat(expressionIterator.hasNext(), is(true)); @@ -67,14 +62,11 @@ public void testWildcardSingleExpression() { for (String wildcardTest : List.of("*", "a*", "*b", "a*b", "a-*b", "a*-b", "-*", "-a*", "-*b", "**", "*-*")) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), - resolvedExpressions(wildcardTest) + List.of(wildcardTest) ); assertThat(expressionList.hasWildcard(), is(true)); if (randomBoolean()) { - expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - resolvedExpressions(wildcardTest) - ); + expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of(wildcardTest)); } Iterator expressionIterator = expressionList.iterator(); assertThat(expressionIterator.hasNext(), is(true)); @@ -90,13 +82,13 @@ public void testWildcardSingleExpression() { } public void testWildcardLongerExpression() { - List onlyExplicits = randomList(7, () -> new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); - ResolvedExpression wildcard = new ResolvedExpression(randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**")); - List expressionList = new ArrayList<>(onlyExplicits.size() + 1); + List onlyExplicits = randomList(7, () -> randomAlphaOfLengthBetween(0, 5)); + String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**"); + List expressionList = new ArrayList<>(onlyExplicits.size() + 1); expressionList.addAll(randomSubsetOf(onlyExplicits)); int wildcardPos = expressionList.size(); expressionList.add(wildcard); - for (ResolvedExpression item : onlyExplicits) { + for (String item : onlyExplicits) { if (expressionList.contains(item) == false) { expressionList.add(item); } @@ -114,18 +106,18 @@ public void testWildcardLongerExpression() { } else { assertThat(expression.isWildcard(), is(true)); } - assertThat(expression.get(), is(expressionList.get(i++).resource())); + assertThat(expression.get(), is(expressionList.get(i++))); } } public void testWildcardsNoExclusionExpressions() { - for (List wildcardExpression : List.of( - resolvedExpressions("*"), - resolvedExpressions("a", "*"), - resolvedExpressions("-b", "*c"), - resolvedExpressions("-", "a", "c*"), - resolvedExpressions("*", "a*", "*b"), - resolvedExpressions("-*", "a", "b*") + for (List wildcardExpression : List.of( + List.of("*"), + List.of("a", "*"), + List.of("-b", "*c"), + List.of("-", "a", "c*"), + List.of("*", "a*", "*b"), + List.of("-*", "a", "b*") )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), @@ -138,25 +130,25 @@ public void testWildcardsNoExclusionExpressions() { int i = 0; for (Expression expression : expressionList) { assertThat(expression.isExclusion(), is(false)); - if (wildcardExpression.get(i).resource().contains("*")) { + if (wildcardExpression.get(i).contains("*")) { assertThat(expression.isWildcard(), is(true)); } else { assertThat(expression.isWildcard(), is(false)); } - assertThat(expression.get(), is(wildcardExpression.get(i++).resource())); + assertThat(expression.get(), is(wildcardExpression.get(i++))); } } } public void testWildcardExpressionNoExpandOptions() { - for (List wildcardExpression : List.of( - resolvedExpressions("*"), - resolvedExpressions("a", "*"), - resolvedExpressions("-b", "*c"), - resolvedExpressions("*d", "-"), - resolvedExpressions("*", "-*"), - resolvedExpressions("-", "a", "c*"), - resolvedExpressions("*", "a*", "*b") + for (List wildcardExpression : List.of( + List.of("*"), + List.of("a", "*"), + List.of("-b", "*c"), + List.of("*d", "-"), + List.of("*", "-*"), + List.of("-", "a", "c*"), + List.of("*", "a*", "*b") )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getNoExpandWildcardsIndicesOptions()), @@ -170,7 +162,7 @@ public void testWildcardExpressionNoExpandOptions() { for (Expression expression : expressionList) { assertThat(expression.isWildcard(), is(false)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(wildcardExpression.get(i++).resource())); + assertThat(expression.get(), is(wildcardExpression.get(i++))); } } } @@ -180,17 +172,17 @@ public void testSingleExclusionExpression() { int wildcardPos = randomIntBetween(0, 3); String exclusion = randomFrom("-*", "-", "-c*", "-ab", "--"); int exclusionPos = randomIntBetween(wildcardPos + 1, 7); - List exclusionExpression = new ArrayList<>(); + List exclusionExpression = new ArrayList<>(); for (int i = 0; i < wildcardPos; i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } - exclusionExpression.add(new ResolvedExpression(wildcard)); + exclusionExpression.add(wildcard); for (int i = wildcardPos + 1; i < exclusionPos; i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } - exclusionExpression.add(new ResolvedExpression(exclusion)); + exclusionExpression.add(exclusion); for (int i = 0; i < randomIntBetween(0, 3); i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), exclusionExpression); if (randomBoolean()) { @@ -201,28 +193,28 @@ public void testSingleExclusionExpression() { if (i == wildcardPos) { assertThat(expression.isWildcard(), is(true)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.get(i++))); } else if (i == exclusionPos) { assertThat(expression.isExclusion(), is(true)); - assertThat(expression.isWildcard(), is(exclusionExpression.get(i).resource().contains("*"))); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource().substring(1))); + assertThat(expression.isWildcard(), is(exclusionExpression.get(i).contains("*"))); + assertThat(expression.get(), is(exclusionExpression.get(i++).substring(1))); } else { assertThat(expression.isWildcard(), is(false)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.get(i++))); } } } public void testExclusionsExpression() { - for (Tuple, List> exclusionExpression : List.of( - new Tuple<>(resolvedExpressions("-a", "*", "-a"), List.of(false, false, true)), - new Tuple<>(resolvedExpressions("-b*", "c", "-a"), List.of(false, false, true)), - new Tuple<>(resolvedExpressions("*d", "-", "*b"), List.of(false, true, false)), - new Tuple<>(resolvedExpressions("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), - new Tuple<>(resolvedExpressions("*-", "-*", "a", "-b"), List.of(false, true, false, true)), - new Tuple<>(resolvedExpressions("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), - new Tuple<>(resolvedExpressions("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) + for (Tuple, List> exclusionExpression : List.of( + new Tuple<>(List.of("-a", "*", "-a"), List.of(false, false, true)), + new Tuple<>(List.of("-b*", "c", "-a"), List.of(false, false, true)), + new Tuple<>(List.of("*d", "-", "*b"), List.of(false, true, false)), + new Tuple<>(List.of("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), + new Tuple<>(List.of("*-", "-*", "a", "-b"), List.of(false, true, false, true)), + new Tuple<>(List.of("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), + new Tuple<>(List.of("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), @@ -235,11 +227,11 @@ public void testExclusionsExpression() { for (Expression expression : expressionList) { boolean isExclusion = exclusionExpression.v2().get(i); assertThat(expression.isExclusion(), is(isExclusion)); - assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).resource().contains("*"))); + assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).contains("*"))); if (isExclusion) { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).resource().substring(1))); + assertThat(expression.get(), is(exclusionExpression.v1().get(i++).substring(1))); } else { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.v1().get(i++))); } } } @@ -314,8 +306,4 @@ private Context getContextWithOptions(IndicesOptions indicesOptions) { when(context.getOptions()).thenReturn(indicesOptions); return context; } - - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index da19bd68e288a..d58de5ca65ea0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata.State; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -1581,27 +1580,16 @@ public void testResolveExpressions() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + assertEquals(new HashSet<>(Arrays.asList("alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "alias-*")); assertEquals( - Set.of(new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")), - indexNameExpressionResolver.resolveExpressions(state, "alias-*") - ); - assertEquals( - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")), + new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "test-0", "alias-*") ); assertEquals( - Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("test-1"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ), + new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "test-*", "alias-*") ); - assertEquals( - Set.of(new ResolvedExpression("test-1"), new ResolvedExpression("alias-1")), - indexNameExpressionResolver.resolveExpressions(state, "*-1") - ); + assertEquals(new HashSet<>(Arrays.asList("test-1", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "*-1")); } public void testFilteringAliases() { @@ -1610,25 +1598,16 @@ public void testFilteringAliases() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = Set.of(new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")); + Set resolvedExpressions = new HashSet<>(Arrays.asList("alias-0", "alias-1")); String[] strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertArrayEquals(new String[] { "alias-0" }, strings); // concrete index supersedes filtering alias - resolvedExpressions = Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); - resolvedExpressions = Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("test-1"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); } @@ -1642,7 +1621,7 @@ public void testIndexAliases() { .putAlias(AliasMetadata.builder("test-alias-non-filtering")) ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "test-*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "test-*"); String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); Arrays.sort(strings); @@ -1677,28 +1656,28 @@ public void testIndexAliasesDataStreamAliases() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { // Only resolve aliases with with that refer to dataStreamName1 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, true, resolvedExpressions); assertThat(result, arrayContainingInAnyOrder("logs_foo", "logs", "logs_bar")); } { // Only resolve aliases with with that refer to dataStreamName2 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex2.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, true, resolvedExpressions); assertThat(result, arrayContainingInAnyOrder("logs_baz", "logs_baz2")); } { // Null is returned, because skipping identity check and resolvedExpressions contains the backing index name - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex2.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, false, resolvedExpressions); assertThat(result, nullValue()); } { // Null is returned, because the wildcard expands to a list of aliases containing an unfiltered alias for dataStreamName1 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1712,7 +1691,7 @@ public void testIndexAliasesDataStreamAliases() { } { // Null is returned, because an unfiltered alias is targeting the same data stream - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "logs_bar", "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "logs_bar", "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1726,7 +1705,7 @@ public void testIndexAliasesDataStreamAliases() { } { // The filtered alias is returned because although we target the data stream name, skipIdentity is true - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1740,7 +1719,7 @@ public void testIndexAliasesDataStreamAliases() { } { // Null is returned because we target the data stream name and skipIdentity is false - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1763,13 +1742,13 @@ public void testIndexAliasesSkipIdentity() { ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")); + Set resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-alias")); String[] aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, false, resolvedExpressions); assertNull(aliases); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); assertArrayEquals(new String[] { "test-alias" }, aliases); - resolvedExpressions = Collections.singleton(new ResolvedExpression("other-alias")); + resolvedExpressions = Collections.singleton("other-alias"); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, false, resolvedExpressions); assertArrayEquals(new String[] { "other-alias" }, aliases); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); @@ -1790,7 +1769,7 @@ public void testConcreteWriteIndexSuccessful() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1872,7 +1851,7 @@ public void testConcreteWriteIndexWithWildcardExpansion() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1910,7 +1889,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1946,7 +1925,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1987,7 +1966,7 @@ public void testAliasResolutionNotAllowingMultipleIndices() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 25ed5fb2bdab2..982394ca31b1c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata.State; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; @@ -21,13 +20,13 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; -import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -51,52 +50,50 @@ public void testConvertWildcardsJustIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX"))), - equalTo(resolvedExpressionsSet("testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testXXX"))), + equalTo(newHashSet("testXXX")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), + equalTo(newHashSet("testXXX", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "ku*"))), - equalTo(resolvedExpressionsSet("testXXX", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "ku*"))), + equalTo(newHashSet("testXXX", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*", "kuku"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*", "-kuku"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("*", "-kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( newHashSet( IndexNameExpressionResolver.WildcardExpressionResolver.resolve( context, - resolvedExpressions("testX*", "-doe", "-testXXX", "-testYYY") + Arrays.asList("testX*", "-doe", "-testXXX", "-testYYY") ) ), - equalTo(resolvedExpressionsSet("testXYY")) + equalTo(newHashSet("testXYY")) ); if (indicesOptions == IndicesOptions.lenientExpandOpen()) { assertThat( - newHashSet( - IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "-testXXX")) - ), - equalTo(resolvedExpressionsSet("testXXX", "-testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), + equalTo(newHashSet("testXXX", "-testXXX")) ); } else if (indicesOptions == IndicesOptions.strictExpandOpen()) { IndexNotFoundException infe = expectThrows( @@ -106,8 +103,8 @@ public void testConvertWildcardsJustIndicesTests() { assertEquals("-testXXX", infe.getIndex().getName()); } assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "-testX*"))), - equalTo(resolvedExpressionsSet("testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), + equalTo(newHashSet("testXXX")) ); } @@ -125,24 +122,24 @@ public void testConvertWildcardsTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testYY*", "alias*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYY*", "alias*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("-kuku"))), - equalTo(resolvedExpressionsSet("-kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("-kuku"))), + equalTo(newHashSet("-kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*", "-testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("test*", "-testYYY"))), + equalTo(newHashSet("testXXX", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*", "testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "testYYY"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testYYY", "testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYYY", "testX*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); } @@ -162,8 +159,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -171,8 +168,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXYY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -180,8 +177,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -220,27 +217,28 @@ public void testMultipleWildcards() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*X*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*X*Y"))), - equalTo(resolvedExpressionsSet("testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*Y"))), + equalTo(newHashSet("testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("kuku*Y*"))), - equalTo(resolvedExpressionsSet("kukuYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("kuku*Y*"))), + equalTo(newHashSet("kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*Y*"))), - equalTo(resolvedExpressionsSet("testXXY", "testXYY", "testYYY", "kukuYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*"))), + equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*Y*X"))).size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*Y*X"))) + .size(), equalTo(0) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*Y*X"))).size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*X"))).size(), equalTo(0) ); } @@ -259,11 +257,11 @@ public void testAll() { ); assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( newHashSet(IndexNameExpressionResolver.resolveExpressions(context, "_all")), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); IndicesOptions noExpandOptions = IndicesOptions.fromOptions( randomBoolean(), @@ -300,7 +298,7 @@ public void testAllAliases() { IndicesOptions.lenientExpandOpen(), // don't include hidden SystemIndexAccessLevel.NONE ); - assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(Set.of())); + assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(newHashSet())); } { @@ -321,7 +319,7 @@ public void testAllAliases() { ); assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet("index-visible-alias")) + equalTo(newHashSet("index-visible-alias")) ); } } @@ -364,7 +362,7 @@ public void testAllDataStreams() { assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet(DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis))) + equalTo(newHashSet(DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis))) ); } @@ -387,7 +385,7 @@ public void testAllDataStreams() { NONE ); - assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(Set.of())); + assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(newHashSet())); } } @@ -508,16 +506,16 @@ public void testResolveAliases() { ); { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_index", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ); assertEquals(0, indices.size()); } @@ -526,45 +524,45 @@ public void testResolveAliases() { IndexNotFoundException.class, () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesStrictContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ) ); assertEquals("foo_a*", infe.getIndex().getName()); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesStrictContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_alias") + Collections.singletonList("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo_alias") + Collections.singletonList("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } { IllegalArgumentException iae = expectThrows( @@ -583,11 +581,11 @@ public void testResolveAliases() { SystemIndexAccessLevel.NONE ); { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( noExpandNoAliasesContext, - resolvedExpressions("foo_alias") + List.of("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } IndicesOptions strictNoExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions( false, @@ -656,18 +654,18 @@ public void testResolveDataStreams() { ); // data streams are not included but expression matches the data stream - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_index", "foo_foo", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_index", "foo_foo", "bar_index")); // data streams are not included and expression doesn't match the data steram indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("bar_*") + Collections.singletonList("bar_*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("bar_bar", "bar_index"))); + assertThat(indices, containsInAnyOrder("bar_bar", "bar_index")); } { @@ -693,39 +691,35 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesAndDataStreamsContext, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); // include all wildcard adds the data stream's backing indices indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesAndDataStreamsContext, - resolvedExpressions("*") + Collections.singletonList("*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - "bar_bar", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + "bar_bar", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); } @@ -754,39 +748,35 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesDataStreamsAndHiddenIndices, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); // include all wildcard adds the data stream's backing indices indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesDataStreamsAndHiddenIndices, - resolvedExpressions("*") + Collections.singletonList("*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - "bar_bar", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + "bar_bar", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); } @@ -818,28 +808,16 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { SystemIndexAccessLevel.NONE ); - Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("bar_bar", "foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of(new ResolvedExpression("*"))); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("bar_bar", "foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("foo*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - onlyIndicesContext, - List.of(new ResolvedExpression("foo*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("foo_alias")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_alias"))); + Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("*")); + assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("*")); + assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo*")); + assertThat(matches, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("foo*")); + assertThat(matches, containsInAnyOrder("foo_foo", "foo_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo_alias")); + assertThat(matches, containsInAnyOrder("foo_alias")); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> IndexNameExpressionResolver.resolveExpressions(onlyIndicesContext, "foo_alias") @@ -862,19 +840,8 @@ private static IndexMetadata.Builder indexBuilder(String index) { private static void assertWildcardResolvesToEmpty(IndexNameExpressionResolver.Context context, String wildcardExpression) { IndexNotFoundException infe = expectThrows( IndexNotFoundException.class, - () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - context, - List.of(new ResolvedExpression(wildcardExpression)) - ) + () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, List.of(wildcardExpression)) ); assertEquals(wildcardExpression, infe.getIndex().getName()); } - - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } - - private Set resolvedExpressionsSet(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).collect(Collectors.toSet()); - } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 17975b7d18dd8..36f7355a541c1 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -78,7 +77,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -679,27 +677,27 @@ public void testBuildAliasFilter() { ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-0")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-0")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-0")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-0")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-1")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "baz"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-1")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bax"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-0", "test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-0", "test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0", "test-alias-1")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -708,7 +706,7 @@ public void testBuildAliasFilter() { assertThat(filter.should(), containsInAnyOrder(QueryBuilders.termQuery("foo", "baz"), QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-0", "test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-0", "test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0", "test-alias-1")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -720,7 +718,7 @@ public void testBuildAliasFilter() { AliasFilter result = indicesService.buildAliasFilter( state, "test-0", - resolvedExpressions("test-alias-0", "test-alias-1", "test-alias-non-filtering") + Set.of("test-alias-0", "test-alias-1", "test-alias-non-filtering") ); assertThat(result.getAliases(), emptyArray()); assertThat(result.getQueryBuilder(), nullValue()); @@ -729,7 +727,7 @@ public void testBuildAliasFilter() { AliasFilter result = indicesService.buildAliasFilter( state, "test-1", - resolvedExpressions("test-alias-0", "test-alias-1", "test-alias-non-filtering") + Set.of("test-alias-0", "test-alias-1", "test-alias-non-filtering") ); assertThat(result.getAliases(), emptyArray()); assertThat(result.getQueryBuilder(), nullValue()); @@ -756,19 +754,19 @@ public void testBuildAliasFilterDataStreamAliases() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { String index = backingIndex2.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "baz"))); } { String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo", "logs")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -778,7 +776,7 @@ public void testBuildAliasFilterDataStreamAliases() { } { String index = backingIndex2.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo", "logs")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -789,13 +787,13 @@ public void testBuildAliasFilterDataStreamAliases() { { // querying an unfiltered and a filtered alias for the same data stream should drop the filters String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs", "logs_bar")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs", "logs_bar")); assertThat(result, is(AliasFilter.EMPTY)); } { // similarly, querying the data stream name and a filtered alias should drop the filter String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs", dataStreamName1)); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs", dataStreamName1)); assertThat(result, is(AliasFilter.EMPTY)); } } @@ -848,8 +846,4 @@ public void testWithTempIndexServiceHandlesExistingIndex() throws Exception { return null; }); } - - private Set resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).collect(Collectors.toSet()); - } } From 0782efa3c0e417f071fb4820d5a6e913d3043a0a Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 22 Oct 2024 11:50:17 -0400 Subject: [PATCH 026/202] [ML] Unmute ServerSentEventsRestActionListenerTests (#113382) Relate #113148 Co-authored-by: Elastic Machine --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 62140d05c9e4a..b84e86ddd7f55 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -146,18 +146,12 @@ tests: - class: org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests method: testChunking issue: https://github.com/elastic/elasticsearch/issues/113139 -- class: org.elasticsearch.xpack.inference.rest.ServerSentEventsRestActionListenerTests - method: testResponse - issue: https://github.com/elastic/elasticsearch/issues/113148 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test30StartStop issue: https://github.com/elastic/elasticsearch/issues/113160 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test33JavaChanged issue: https://github.com/elastic/elasticsearch/issues/113177 -- class: org.elasticsearch.xpack.inference.rest.ServerSentEventsRestActionListenerTests - method: testErrorMidStream - issue: https://github.com/elastic/elasticsearch/issues/113179 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 From 9c923db8f61576701a3dfd8b7ee4b0152a37091e Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Tue, 22 Oct 2024 12:15:44 -0400 Subject: [PATCH 027/202] [ESQL] Support date_nanos on functions that take "any" type (#114056) Resolves #109998 For the most part, this is just adding tests. Greater and Least have actual production code changes - notably toEvaluator is modified to map date nanos to the long evaluator. This parallels the work done in #113961. I've added CSV tests and unit tests for all the functions listed in the original ticket. --------- Co-authored-by: Elastic Machine --- .../src/main/resources/date_nanos.csv | 1 + .../src/main/resources/date_nanos.csv-spec | 72 ++++++++++++++++++- .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../function/scalar/conditional/Greatest.java | 8 +-- .../function/scalar/conditional/Least.java | 8 +-- .../function/scalar/multivalue/MvDedupe.java | 2 + .../function/scalar/multivalue/MvSlice.java | 2 + .../function/scalar/multivalue/MvSort.java | 4 +- .../function/scalar/nulls/Coalesce.java | 3 + .../function/AbstractFunctionTestCase.java | 12 ++-- .../scalar/conditional/CaseTests.java | 1 + .../scalar/conditional/GreatestTests.java | 15 ++++ .../scalar/conditional/LeastTests.java | 15 ++++ .../scalar/multivalue/MvDedupeTests.java | 1 + .../scalar/multivalue/MvSliceTests.java | 17 +++++ .../scalar/multivalue/MvSortTests.java | 14 ++++ .../function/scalar/nulls/CoalesceTests.java | 13 ++++ 17 files changed, 177 insertions(+), 16 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv index 83a2f3cb1c281..029c3baf3cbfb 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv @@ -7,3 +7,4 @@ millis:date,nanos:date_nanos,num:long 2023-10-23T12:27:28.948Z,2023-10-23T12:27:28.948000000Z,1698064048948000000 2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 1999-10-23T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-02-23T13:33:34.937193000Z, 2023-01-23T13:55:01.543123456Z], 0 +1999-10-22T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z], 0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index ad7149b0f742f..515e2c9c6587f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -38,9 +38,10 @@ nanos:date_nanos mv_min on date nanos required_capability: date_nanos_type -FROM date_nanos | SORT millis ASC | EVAL nanos = MV_MIN(nanos) | KEEP nanos | LIMIT 1; +FROM date_nanos | SORT millis ASC | WHERE millis < "2000-01-01" | EVAL nanos = MV_MIN(nanos) | KEEP nanos; nanos:date_nanos +2023-03-23T12:15:03.360103847Z 2023-01-23T13:55:01.543123456Z ; @@ -56,9 +57,10 @@ ct:integer mv_first on date nanos required_capability: date_nanos_type -FROM date_nanos | SORT millis ASC | EVAL nanos = MV_FIRST(nanos) | KEEP nanos | LIMIT 1; +FROM date_nanos | SORT millis ASC | WHERE millis < "2000-01-01" | EVAL nanos = MV_FIRST(nanos) | KEEP nanos; nanos:date_nanos +2023-03-23T12:15:03.360103847Z 2023-01-23T13:55:01.543123456Z ; @@ -263,3 +265,69 @@ ROW a = TO_DATE_NANOS(null), b = TO_DATE_NANOS(null + 1::long), c = TO_DATE_NANO a:date_nanos | b:date_nanos | c:date_nanos null | null | null ; + +Coalasce date nanos +required_capability: to_date_nanos + +ROW a = COALESCE(null, TO_DATE_NANOS(1698069301543123456)); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Case date nanos result +required_capability: to_date_nanos + +ROW a = CASE(false, TO_DATE_NANOS(0::long), TO_DATE_NANOS(1698069301543123456)); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Greatest date nanos +required_capability: least_greatest_for_datenanos + +ROW a = GREATEST(TO_DATE_NANOS("2023-10-23T13:55:01.543123456"), TO_DATE_NANOS("2023-10-23T13:53:55.832987654")); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Least date nanos +required_capability: least_greatest_for_datenanos + +ROW a = LEAST(TO_DATE_NANOS("2023-10-23T13:55:01.543123456"), TO_DATE_NANOS("2023-10-23T13:53:55.832987654")); + +a:date_nanos +2023-10-23T13:53:55.832987654Z +; + +mv_dedup over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_DEDUPE(nanos) | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-01-23T13:55:01.543123456Z, 2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +2023-03-23T12:15:03.360103847Z +; + +mv_sort over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_SORT(nanos, "asc") | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-01-23T13:55:01.543123456Z, 2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] +; + +mv_slice over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_SLICE(MV_SORT(nanos, "asc"), 1, 2) | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index df9f14a6ac227..94211e4726a2c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -308,6 +308,11 @@ public enum Cap { */ TO_DATE_NANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** + * Support Least and Greatest functions on Date Nanos type + */ + LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** * Support for datetime in least and greatest functions */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index d47ebeab4ca6c..aad2d37d414b8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -43,7 +43,7 @@ public class Greatest extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Returns the maximum value from multiple columns. This is similar to <>\n" + "except it is intended to run on multiple columns at once.", note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. " @@ -54,12 +54,12 @@ public Greatest( Source source, @Param( name = "first", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "First of the columns to evaluate." ) Expression first, @Param( name = "rest", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "The rest of the columns to evaluate.", optional = true ) List rest @@ -152,7 +152,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.INTEGER) { return new GreatestIntEvaluator.Factory(source(), factories); } - if (dataType == DataType.LONG || dataType == DataType.DATETIME) { + if (dataType == DataType.LONG || dataType == DataType.DATETIME || dataType == DataType.DATE_NANOS) { return new GreatestLongEvaluator.Factory(source(), factories); } if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 81c1419dcf788..70ba9319385f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -43,7 +43,7 @@ public class Least extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Returns the minimum value from multiple columns. " + "This is similar to <> except it is intended to run on multiple columns at once.", examples = @Example(file = "math", tag = "least") @@ -52,12 +52,12 @@ public Least( Source source, @Param( name = "first", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "First of the columns to evaluate." ) Expression first, @Param( name = "rest", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "The rest of the columns to evaluate.", optional = true ) List rest @@ -151,7 +151,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.INTEGER) { return new LeastIntEvaluator.Factory(source(), factories); } - if (dataType == DataType.LONG || dataType == DataType.DATETIME) { + if (dataType == DataType.LONG || dataType == DataType.DATETIME || dataType == DataType.DATE_NANOS) { return new LeastLongEvaluator.Factory(source(), factories); } if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index b17ddddb422ce..34b89b4f78997 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -38,6 +38,7 @@ public class MvDedupe extends AbstractMultivalueFunction { "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", @@ -60,6 +61,7 @@ public MvDedupe( "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index a829b6f1417b9..ef562c339dfd9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -59,6 +59,7 @@ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, Eva "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", @@ -87,6 +88,7 @@ public MvSlice( "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index d9e41233952de..5ca5618bf2a54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -69,7 +69,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Sorts a multivalued field in lexicographical order.", examples = @Example(file = "ints", tag = "mv_sort") ) @@ -77,7 +77,7 @@ public MvSort( Source source, @Param( name = "field", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Multivalue expression. If `null`, the function returns `null`." ) Expression field, @Param( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 575bb085c41f7..6b9c8d0da025b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -53,6 +53,7 @@ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", @@ -73,6 +74,7 @@ public Coalesce( "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", @@ -90,6 +92,7 @@ public Coalesce( "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 84a41ef040c8e..397c269ae49ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -722,17 +722,19 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size() && i < types.size(); i++) { typesFromSignature.get(i).add(types.get(i).esNameIfPossible()); } - returnFromSignature.add(entry.getValue().esNameIfPossible()); + if (DataType.UNDER_CONSTRUCTION.containsKey(entry.getValue()) == false) { + returnFromSignature.add(entry.getValue().esNameIfPossible()); + } } for (int i = 0; i < args.size(); i++) { EsqlFunctionRegistry.ArgSignature arg = args.get(i); Set annotationTypes = Arrays.stream(arg.type()) - .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) .collect(Collectors.toCollection(TreeSet::new)); Set signatureTypes = typesFromSignature.get(i) .stream() - .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) .collect(Collectors.toCollection(TreeSet::new)); if (signatureTypes.isEmpty()) { log.info("{}: skipping", arg.name()); @@ -746,7 +748,9 @@ public static void testFunctionInfo() { ); } - Set returnTypes = Arrays.stream(description.returnType()).collect(Collectors.toCollection(TreeSet::new)); + Set returnTypes = Arrays.stream(description.returnType()) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) + .collect(Collectors.toCollection(TreeSet::new)); assertEquals(returnFromSignature, returnTypes); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index db3fce244c9a8..fbb7c691b1d94 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -46,6 +46,7 @@ public class CaseTests extends AbstractScalarFunctionTestCase { DataType.TEXT, DataType.BOOLEAN, DataType.DATETIME, + DataType.DATE_NANOS, DataType.DOUBLE, DataType.INTEGER, DataType.LONG, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java index 311e3e3d89149..07d6ae34dc1e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java @@ -115,6 +115,21 @@ public static Iterable parameters() { ) ) ); + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1727877348000123456L, DataType.DATE_NANOS, "a"), + new TestCaseSupplier.TypedData(1727790948000987654L, DataType.DATE_NANOS, "b") + ), + "GreatestLongEvaluator[values=[MvMax[field=Attribute[channel=0]], MvMax[field=Attribute[channel=1]]]]", + DataType.DATE_NANOS, + equalTo(1727877348000123456L) + ) + ) + ); return parameterSuppliersFromTypedData(anyNullIsNull(false, suppliers)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java index 69842fde90312..d95cc79dd22e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java @@ -114,6 +114,21 @@ public static Iterable parameters() { ) ) ); + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1727877348000123456L, DataType.DATE_NANOS, "a"), + new TestCaseSupplier.TypedData(1727790948000987654L, DataType.DATE_NANOS, "b") + ), + "LeastLongEvaluator[values=[MvMin[field=Attribute[channel=0]], MvMin[field=Attribute[channel=1]]]]", + DataType.DATE_NANOS, + equalTo(1727790948000987654L) + ) + ) + ); return parameterSuppliersFromTypedData(anyNullIsNull(false, suppliers)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java index d8d3b607efcc0..f3b44274f3ade 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java @@ -39,6 +39,7 @@ public static Iterable parameters() { booleans(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values)); bytesRefs(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values)); dateTimes(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); + dateNanos(cases, "mv_dedupe", "MvDedupe", DataType.DATE_NANOS, (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); doubles(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Double::valueOf))); ints(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Integer::valueOf))); longs(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java index e5bac422805af..859c79090d62f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -182,6 +182,23 @@ private static void longs(List suppliers) { equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) ); })); + + suppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.INTEGER, DataType.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.DATE_NANOS, "field"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataType.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataType.DATE_NANOS, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); } private static void doubles(List suppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java index d07ed2aeae887..63f538059dddf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java @@ -110,6 +110,20 @@ private static void longs(List suppliers) { equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) ); })); + + suppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLong()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.DATE_NANOS, "field"), + new TestCaseSupplier.TypedData(order, DataType.KEYWORD, "order").forceLiteral() + ), + "MvSortLong[field=Attribute[channel=0], order=false]", + DataType.DATE_NANOS, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); } private static void doubles(List suppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index c9b6de64e079d..797c99992815e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -96,6 +96,19 @@ public static Iterable parameters() { equalTo(firstDate == null ? secondDate : firstDate) ); })); + noNullsSuppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), () -> { + Long firstDate = randomBoolean() ? null : randomNonNegativeLong(); + Long secondDate = randomNonNegativeLong(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(firstDate, DataType.DATE_NANOS, "first"), + new TestCaseSupplier.TypedData(secondDate, DataType.DATE_NANOS, "second") + ), + "CoalesceEvaluator[values=[Attribute[channel=0], Attribute[channel=1]]]", + DataType.DATE_NANOS, + equalTo(firstDate == null ? secondDate : firstDate) + ); + })); List suppliers = new ArrayList<>(noNullsSuppliers); for (TestCaseSupplier s : noNullsSuppliers) { From 157c98360730421d86ff43d774a815d59f6cfb54 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 03:46:37 +1100 Subject: [PATCH 028/202] Mute org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests testWatchWithLastCheckedTimeExecutesBeforeInitialInterval #115354 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b84e86ddd7f55..15bd6096c71bd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -322,6 +322,9 @@ tests: - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115339 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115354 # Examples: # From 57fbbcb9e7f58640b6bd98927307731b118d5000 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 22 Oct 2024 20:05:48 +0300 Subject: [PATCH 029/202] Fix leak in TimeSeriesRateAggregatorTests (#115345) Fixes #115334 --- muted-tests.yml | 2 -- .../analytics/rate/TimeSeriesRateAggregatorTests.java | 8 ++++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 15bd6096c71bd..7bad160f8d5ba 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -317,8 +317,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 -- class: org.elasticsearch.xpack.analytics.rate.TimeSeriesRateAggregatorTests - issue: https://github.com/elastic/elasticsearch/issues/115334 - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115339 diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index f517c03468bc2..753ce8283afca 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -156,8 +156,12 @@ public void testNestedWithinAutoDateHistogram() throws IOException { AggTestConfig aggTestConfig = new AggTestConfig(tsBuilder, timeStampField(), counterField("counter_field")) .withSplitLeavesIntoSeperateAggregators(false); expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { - iw.addDocuments(docs(2000, "1", 15, 37, 60, /*reset*/ 14)); - iw.addDocuments(docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)); + for (Document document : docs(2000, "1", 15, 37, 60, /*reset*/ 14)) { + iw.addDocument(document); + } + for (Document document : docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)) { + iw.addDocument(document); + } }, verifier, aggTestConfig)); } From 82f2fb554e3a5bb849becb0a0df411b832735451 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Tue, 22 Oct 2024 13:41:17 -0400 Subject: [PATCH 030/202] fix test to not run when the FF is disabled (#114260) Fixes #113661 Don't run the tests when the feature is disabled. Co-authored-by: Elastic Machine --- .../esql/functions/kibana/definition/to_date_nanos.json | 3 ++- muted-tests.yml | 2 -- .../expression/function/scalar/convert/ToDateNanosTests.java | 4 ++++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json index bafbcf2bc2038..07ffe84444f02 100644 --- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json +++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json @@ -5,5 +5,6 @@ "description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).", "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.", "signatures" : [ ], - "preview" : true + "preview" : true, + "snapshot_only" : false } diff --git a/muted-tests.yml b/muted-tests.yml index 7bad160f8d5ba..316ff67c691a4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -206,8 +206,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113722 -- class: org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDateNanosTests - issue: https://github.com/elastic/elasticsearch/issues/113661 - class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT method: testNonGzippedDatabase issue: https://github.com/elastic/elasticsearch/issues/113821 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java index e91a5cc1ebca4..485073d1a91d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.common.time.DateUtils; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -29,6 +30,9 @@ public ToDateNanosTests(@Name("TestCase") Supplier te @ParametersFactory public static Iterable parameters() { + if (EsqlCapabilities.Cap.TO_DATE_NANOS.isEnabled() == false) { + return List.of(); + } final String read = "Attribute[channel=0]"; final List suppliers = new ArrayList<>(); From f8e931d6b5e4e17ef43ac3b39e4c7c40cbc24111 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 04:43:08 +1100 Subject: [PATCH 031/202] Mute org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests testAddWithLastCheckedTimeExecutesBeforeInitialInterval #115356 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 316ff67c691a4..6a575c9058363 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -321,6 +321,9 @@ tests: - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115354 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115356 # Examples: # From d65a0309faa63a591845844130ba905088cd4fab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Tue, 22 Oct 2024 19:53:09 +0200 Subject: [PATCH 032/202] Fix AvgTests error on -0.0 avg (#113272) Fixes https://github.com/elastic/elasticsearch/issues/113225 Fixes https://github.com/elastic/elasticsearch/issues/114175 --- muted-tests.yml | 6 - .../function/AbstractAggregationTestCase.java | 30 +- .../function/AbstractFunctionTestCase.java | 30 + .../AbstractScalarFunctionTestCase.java | 11 +- .../function/MultiRowTestCaseSupplier.java | 521 ++++++++---------- .../function/aggregate/AvgTests.java | 38 +- 6 files changed, 276 insertions(+), 360 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 6a575c9058363..325fcbf04ce59 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -164,9 +164,6 @@ tests: - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test80JavaOptsInEnvVar issue: https://github.com/elastic/elasticsearch/issues/113219 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.AvgTests - method: "testFold {TestCase= #2}" - issue: https://github.com/elastic/elasticsearch/issues/113225 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test81JavaOptsInJvmOptions issue: https://github.com/elastic/elasticsearch/issues/113313 @@ -236,9 +233,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.AvgTests - method: "testFold {TestCase= #7}" - issue: https://github.com/elastic/elasticsearch/issues/114175 - class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT method: testStepInfoPreservedOnAutoRetry issue: https://github.com/elastic/elasticsearch/issues/114220 diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 1c917a961a343..db5d8e03458ea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -163,15 +163,7 @@ private void aggregateSingleMode(Expression expression) { result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void aggregateGroupingSingleMode(Expression expression) { @@ -263,15 +255,7 @@ private void aggregateWithIntermediates(Expression expression) { result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void evaluate(Expression evaluableExpression) { @@ -288,15 +272,7 @@ private void evaluate(Expression evaluableExpression) { if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { result = NumericUtils.unsignedLongAsBigInteger((Long) result); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void resolveExpression(Expression expression, Consumer onAggregator, Consumer onEvaluableExpression) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 397c269ae49ff..c05f8e0990b3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -99,8 +99,10 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; /** @@ -754,6 +756,34 @@ public static void testFunctionInfo() { assertEquals(returnFromSignature, returnTypes); } + /** + * Asserts the result of a test case matches the expected result and warnings. + *

+ * The {@code result} parameter should be an object as returned by {@link #toJavaObjectUnsignedLongAware}. + *

+ */ + @SuppressWarnings("unchecked") + protected final void assertTestCaseResultAndWarnings(Object result) { + if (result instanceof Iterable) { + var collectionResult = (Iterable) result; + assertThat(collectionResult, not(hasItem(Double.NaN))); + assertThat(collectionResult, not(hasItem(Double.POSITIVE_INFINITY))); + assertThat(collectionResult, not(hasItem(Double.NEGATIVE_INFINITY))); + } + + assert testCase.getMatcher().matches(Double.NaN) == false; + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + protected final void assertTypeResolutionFailure(Expression expression) { assertTrue("expected unresolved", expression.typeResolved().unresolved()); assertThat(expression.typeResolved().message(), equalTo(testCase.getExpectedTypeError())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 85db73901352b..65e8a53fc05c5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -41,7 +41,6 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -127,15 +126,7 @@ public final void testEvaluate() { result = toJavaObjectUnsignedLongAware(block, 0); } } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java index e740533462746..7fe67707a7976 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java @@ -19,6 +19,7 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.List; +import java.util.function.Supplier; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomList; @@ -37,56 +38,36 @@ public static List intCases(int minRows, int maxRows, int min List cases = new ArrayList<>(); if (0 <= max && 0 >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 ints>", () -> randomList(minRows, maxRows, () -> 0), DataType.INTEGER, false, true)); + addSuppliers(cases, minRows, maxRows, "0 int", DataType.INTEGER, () -> 0); } if (max != 0) { - cases.add( - new TypedDataSupplier("<" + max + " ints>", () -> randomList(minRows, maxRows, () -> max), DataType.INTEGER, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " int", DataType.INTEGER, () -> max); } if (min != 0 && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " ints>", () -> randomList(minRows, maxRows, () -> min), DataType.INTEGER, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " int", DataType.INTEGER, () -> min); } int lower = Math.max(min, 1); int upper = Math.min(max, Integer.MAX_VALUE); if (lower < upper) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower, upper)), - DataType.INTEGER, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "positive int", DataType.INTEGER, () -> ESTestCase.randomIntBetween(lower, upper)); } int lower1 = Math.max(min, Integer.MIN_VALUE); int upper1 = Math.min(max, -1); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower1, upper1)), - DataType.INTEGER, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "negative int", DataType.INTEGER, () -> ESTestCase.randomIntBetween(lower1, upper1)); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random int", DataType.INTEGER, () -> { if (includeZero) { return ESTestCase.randomIntBetween(min, max); } return randomBoolean() ? ESTestCase.randomIntBetween(min, -1) : ESTestCase.randomIntBetween(1, max); - }), DataType.INTEGER, false, true)); + }); } return cases; @@ -96,56 +77,36 @@ public static List longCases(int minRows, int maxRows, long m List cases = new ArrayList<>(); if (0 <= max && 0 >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 longs>", () -> randomList(minRows, maxRows, () -> 0L), DataType.LONG, false, true)); + addSuppliers(cases, minRows, maxRows, "0 long", DataType.LONG, () -> 0L); } if (max != 0) { - cases.add( - new TypedDataSupplier("<" + max + " longs>", () -> randomList(minRows, maxRows, () -> max), DataType.LONG, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " long", DataType.LONG, () -> max); } if (min != 0 && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " longs>", () -> randomList(minRows, maxRows, () -> min), DataType.LONG, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " long", DataType.LONG, () -> min); } long lower = Math.max(min, 1); long upper = Math.min(max, Long.MAX_VALUE); if (lower < upper) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower, upper)), - DataType.LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "positive long", DataType.LONG, () -> ESTestCase.randomLongBetween(lower, upper)); } long lower1 = Math.max(min, Long.MIN_VALUE); long upper1 = Math.min(max, -1); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower1, upper1)), - DataType.LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "negative long", DataType.LONG, () -> ESTestCase.randomLongBetween(lower1, upper1)); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random long", DataType.LONG, () -> { if (includeZero) { return ESTestCase.randomLongBetween(min, max); } return randomBoolean() ? ESTestCase.randomLongBetween(min, -1) : ESTestCase.randomLongBetween(1, max); - }), DataType.LONG, false, true)); + }); } return cases; @@ -156,29 +117,20 @@ public static List ulongCases(int minRows, int maxRows, BigIn // Zero if (BigInteger.ZERO.compareTo(max) <= 0 && BigInteger.ZERO.compareTo(min) >= 0 && includeZero) { - cases.add( - new TypedDataSupplier( - "<0 unsigned longs>", - () -> randomList(minRows, maxRows, () -> BigInteger.ZERO), - DataType.UNSIGNED_LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "0 unsigned long", DataType.UNSIGNED_LONG, () -> BigInteger.ZERO); } // Small values, less than Long.MAX_VALUE BigInteger lower1 = min.max(BigInteger.ONE); BigInteger upper1 = max.min(BigInteger.valueOf(Long.MAX_VALUE)); if (lower1.compareTo(upper1) < 0) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomUnsignedLongBetween(lower1, upper1)), - DataType.UNSIGNED_LONG, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small unsigned long", + DataType.UNSIGNED_LONG, + () -> ESTestCase.randomUnsignedLongBetween(lower1, upper1) ); } @@ -186,14 +138,13 @@ public static List ulongCases(int minRows, int maxRows, BigIn BigInteger lower2 = min.max(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)); BigInteger upper2 = max.min(ESTestCase.UNSIGNED_LONG_MAX); if (lower2.compareTo(upper2) < 0) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomUnsignedLongBetween(lower2, upper2)), - DataType.UNSIGNED_LONG, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big unsigned long", + DataType.UNSIGNED_LONG, + () -> ESTestCase.randomUnsignedLongBetween(lower2, upper2) ); } @@ -204,85 +155,77 @@ public static List doubleCases(int minRows, int maxRows, doub List cases = new ArrayList<>(); if (0d <= max && 0d >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 doubles>", () -> randomList(minRows, maxRows, () -> 0d), DataType.DOUBLE, false, true)); - cases.add(new TypedDataSupplier("<-0 doubles>", () -> randomList(minRows, maxRows, () -> -0d), DataType.DOUBLE, false, true)); + addSuppliers(cases, minRows, maxRows, "0 double", DataType.DOUBLE, () -> 0d); + addSuppliers(cases, minRows, maxRows, "-0 double", DataType.DOUBLE, () -> -0d); } if (max != 0d) { - cases.add( - new TypedDataSupplier("<" + max + " doubles>", () -> randomList(minRows, maxRows, () -> max), DataType.DOUBLE, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " double", DataType.DOUBLE, () -> max); } if (min != 0d && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " doubles>", () -> randomList(minRows, maxRows, () -> min), DataType.DOUBLE, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " double", DataType.DOUBLE, () -> min); } double lower1 = Math.max(min, 0d); double upper1 = Math.min(max, 1d); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower1, upper1, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small positive double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower1, upper1, true) ); } double lower2 = Math.max(min, -1d); double upper2 = Math.min(max, 0d); if (lower2 < upper2) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower2, upper2, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small negative double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower2, upper2, true) ); } double lower3 = Math.max(min, 1d); double upper3 = Math.min(max, Double.MAX_VALUE); if (lower3 < upper3) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower3, upper3, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big positive double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower3, upper3, true) ); } double lower4 = Math.max(min, -Double.MAX_VALUE); double upper4 = Math.min(max, -1d); if (lower4 < upper4) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower4, upper4, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big negative double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower4, upper4, true) ); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random double", DataType.DOUBLE, () -> { if (includeZero) { return ESTestCase.randomDoubleBetween(min, max, true); } return randomBoolean() ? ESTestCase.randomDoubleBetween(min, -1, true) : ESTestCase.randomDoubleBetween(1, max, true); - }), DataType.DOUBLE, false, true)); + }); } return cases; @@ -291,149 +234,126 @@ public static List doubleCases(int minRows, int maxRows, doub public static List dateCases(int minRows, int maxRows) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "<1970-01-01T00:00:00Z dates>", - () -> randomList(minRows, maxRows, () -> 0L), - DataType.DATETIME, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "1970-01-01T00:00:00Z date", DataType.DATETIME, () -> 0L); - cases.add( - new TypedDataSupplier( - "", - // 1970-01-01T00:00:00Z - 2286-11-20T17:46:40Z - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11)), - DataType.DATETIME, - false, - true - ) - ); + // 1970-01-01T00:00:00Z - 2286-11-20T17:46:40Z + addSuppliers(cases, minRows, maxRows, "random date", DataType.DATETIME, () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11)); - cases.add( - new TypedDataSupplier( - "", - // 2286-11-20T17:46:40Z - +292278994-08-17T07:12:55.807Z - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE)), - DataType.DATETIME, - false, - true - ) + // 2286-11-20T17:46:40Z - +292278994-08-17T07:12:55.807Z + addSuppliers( + cases, + minRows, + maxRows, + "far future date", + DataType.DATETIME, + () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE) ); - cases.add( - new TypedDataSupplier( - "", - // very close to +292278994-08-17T07:12:55.807Z, the maximum supported millis since epoch - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE)), - DataType.DATETIME, - false, - true - ) + // Very close to +292278994-08-17T07:12:55.807Z, the maximum supported millis since epoch + addSuppliers( + cases, + minRows, + maxRows, + "near the end of time date", + DataType.DATETIME, + () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE) ); return cases; } public static List booleanCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> true), DataType.BOOLEAN, false, true), - new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> false), DataType.BOOLEAN, false, true), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, ESTestCase::randomBoolean), - DataType.BOOLEAN, - false, - true - ) - ); + List cases = new ArrayList<>(); + + addSuppliers(cases, minRows, maxRows, "true boolean", DataType.BOOLEAN, () -> true); + addSuppliers(cases, minRows, maxRows, "false boolean", DataType.BOOLEAN, () -> false); + addSuppliers(cases, minRows, maxRows, "random boolean", DataType.BOOLEAN, ESTestCase::randomBoolean); + + return cases; } public static List ipCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier( - "<127.0.0.1 ips>", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))), - DataType.IP, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(true)))), - DataType.IP, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(false)))), - DataType.IP, - false, - true - ) + List cases = new ArrayList<>(); + + addSuppliers( + cases, + minRows, + maxRows, + "127.0.0.1 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1"))) + ); + addSuppliers( + cases, + minRows, + maxRows, + "random v4 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(true))) + ); + addSuppliers( + cases, + minRows, + maxRows, + "random v6 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(false))) ); + + return cases; } public static List versionCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new Version(Integer.toString(ESTestCase.between(0, 100))).toBytesRef()), - DataType.VERSION, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - maxRows, - () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() - ), - DataType.VERSION, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - maxRows, - () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)) - .toBytesRef() - ), - DataType.VERSION, - false, - true - ) + List cases = new ArrayList<>(); + + addSuppliers( + cases, + minRows, + maxRows, + "major version", + DataType.VERSION, + () -> new Version(Integer.toString(ESTestCase.between(0, 100))).toBytesRef() ); + addSuppliers( + cases, + minRows, + maxRows, + "major.minor version", + DataType.VERSION, + () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() + ); + addSuppliers( + cases, + minRows, + maxRows, + "major.minor.patch version", + DataType.VERSION, + () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() + ); + + return cases; } public static List geoPointCases(int minRows, int maxRows, boolean withAltitude) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> GEO.asWkb(GeometryTestUtils.randomPoint(false))), - DataType.GEO_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.GEO_POINT, + () -> GEO.asWkb(GeometryTestUtils.randomPoint(false)) ); if (withAltitude) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> GEO.asWkb(GeometryTestUtils.randomPoint(false))), - DataType.GEO_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.GEO_POINT, + () -> GEO.asWkb(GeometryTestUtils.randomPoint(true)) ); } @@ -443,25 +363,23 @@ public static List geoPointCases(int minRows, int maxRows, bo public static List cartesianPointCases(int minRows, int maxRows, boolean withAltitude) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(false))), - DataType.CARTESIAN_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.CARTESIAN_POINT, + () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(false)) ); if (withAltitude) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(true))), - DataType.CARTESIAN_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.CARTESIAN_POINT, + () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(true)) ); } @@ -471,59 +389,64 @@ public static List cartesianPointCases(int minRows, int maxRo public static List stringCases(int minRows, int maxRows, DataType type) { List cases = new ArrayList<>(); - cases.addAll( - List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef("")), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(1, 30))), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(1, 30))), - type, - false, - true - ) - ) + addSuppliers(cases, minRows, maxRows, "empty " + type, type, () -> new BytesRef("")); + addSuppliers( + cases, + minRows, + maxRows, + "short alpha " + type, + type, + () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(1, 30)) + ); + addSuppliers( + cases, + minRows, + maxRows, + "short unicode " + type, + type, + () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(1, 30)) ); if (minRows <= 100) { var longStringsMaxRows = Math.min(maxRows, 100); - cases.addAll( - List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, longStringsMaxRows, () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(300, 1000))), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - longStringsMaxRows, - () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(300, 1000)) - ), - type, - false, - true - ) - ) + addSuppliers( + cases, + minRows, + longStringsMaxRows, + "long alpha " + type, + type, + () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(300, 1000)) + ); + addSuppliers( + cases, + minRows, + longStringsMaxRows, + "long unicode " + type, + type, + () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(300, 1000)) ); } return cases; } + + private static void addSuppliers( + List cases, + int minRows, + int maxRows, + String name, + DataType type, + Supplier valueSupplier + ) { + if (minRows <= 1 && maxRows >= 1) { + cases.add(new TypedDataSupplier("", () -> randomList(1, 1, valueSupplier), type, false, true)); + } + + if (maxRows > 1) { + cases.add( + new TypedDataSupplier("<" + name + "s>", () -> randomList(Math.max(2, minRows), maxRows, valueSupplier), type, false, true) + ); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java index 80737dac1aa58..ac599c7ff05f8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java @@ -64,25 +64,27 @@ protected Expression build(Source source, List args) { private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { return new TestCaseSupplier(List.of(fieldSupplier.type()), () -> { var fieldTypedData = fieldSupplier.get(); + var fieldData = fieldTypedData.multiRowData(); - Object expected = switch (fieldTypedData.type().widenSmallNumeric()) { - case INTEGER -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Integer) v) - .collect(Collectors.summarizingInt(Integer::intValue)) - .getAverage(); - case LONG -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Long) v) - .collect(Collectors.summarizingLong(Long::longValue)) - .getAverage(); - case DOUBLE -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Double) v) - .collect(Collectors.summarizingDouble(Double::doubleValue)) - .getAverage(); - default -> throw new IllegalStateException("Unexpected value: " + fieldTypedData.type()); - }; + Object expected = null; + + if (fieldData.size() == 1) { + // For single elements, we directly return them to avoid precision issues + expected = ((Number) fieldData.get(0)).doubleValue(); + } else if (fieldData.size() > 1) { + expected = switch (fieldTypedData.type().widenSmallNumeric()) { + case INTEGER -> fieldData.stream() + .map(v -> (Integer) v) + .collect(Collectors.summarizingInt(Integer::intValue)) + .getAverage(); + case LONG -> fieldData.stream().map(v -> (Long) v).collect(Collectors.summarizingLong(Long::longValue)).getAverage(); + case DOUBLE -> fieldData.stream() + .map(v -> (Double) v) + .collect(Collectors.summarizingDouble(Double::doubleValue)) + .getAverage(); + default -> throw new IllegalStateException("Unexpected value: " + fieldTypedData.type()); + }; + } return new TestCaseSupplier.TestCase( List.of(fieldTypedData), From 0ab79db1831255323225dd581c67f33de3cfd084 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 05:42:41 +1100 Subject: [PATCH 033/202] Mute org.elasticsearch.xpack.inference.DefaultEndPointsIT testInferDeploysDefaultE5 #115361 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 325fcbf04ce59..a3fd1fa395f5d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -318,6 +318,9 @@ tests: - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115356 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testInferDeploysDefaultE5 + issue: https://github.com/elastic/elasticsearch/issues/115361 # Examples: # From c9e57700af992ca3e2f3c85825f3ed6be822db2e Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Tue, 22 Oct 2024 16:05:01 -0400 Subject: [PATCH 034/202] Optimize downloader task executor (#115355) --- .../geoip/GeoIpDownloaderTaskExecutor.java | 67 +++++++++++-------- .../ingest/ConfigurationUtils.java | 3 +- 2 files changed, 40 insertions(+), 30 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index eacf2e5a2ee57..e4150005ed1ae 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -43,13 +43,14 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; @@ -238,14 +239,11 @@ public void clusterChanged(ClusterChangedEvent event) { } static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { - if (pipelineConfigurationsWithGeoIpProcessor(clusterState, true).isEmpty() == false) { + if (pipelinesWithGeoIpProcessor(clusterState, true).isEmpty() == false) { return true; } - Set checkReferencedPipelines = pipelineConfigurationsWithGeoIpProcessor(clusterState, false).stream() - .map(PipelineConfiguration::getId) - .collect(Collectors.toSet()); - + final Set checkReferencedPipelines = pipelinesWithGeoIpProcessor(clusterState, false); if (checkReferencedPipelines.isEmpty()) { return false; } @@ -258,22 +256,24 @@ static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { } /** - * Retrieve list of pipelines that have at least one geoip processor. + * Retrieve the set of pipeline ids that have at least one geoip processor. * @param clusterState Cluster state. * @param downloadDatabaseOnPipelineCreation Filter the list to include only pipeline with the download_database_on_pipeline_creation * matching the param. - * @return A list of {@link PipelineConfiguration} matching criteria. + * @return A set of pipeline ids matching criteria. */ @SuppressWarnings("unchecked") - private static List pipelineConfigurationsWithGeoIpProcessor( - ClusterState clusterState, - boolean downloadDatabaseOnPipelineCreation - ) { - List pipelineDefinitions = IngestService.getPipelines(clusterState); - return pipelineDefinitions.stream().filter(pipelineConfig -> { - List> processors = (List>) pipelineConfig.getConfigAsMap().get(Pipeline.PROCESSORS_KEY); - return hasAtLeastOneGeoipProcessor(processors, downloadDatabaseOnPipelineCreation); - }).toList(); + private static Set pipelinesWithGeoIpProcessor(ClusterState clusterState, boolean downloadDatabaseOnPipelineCreation) { + List configurations = IngestService.getPipelines(clusterState); + Set ids = new HashSet<>(); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (PipelineConfiguration configuration : configurations) { + List> processors = (List>) configuration.getConfigAsMap().get(Pipeline.PROCESSORS_KEY); + if (hasAtLeastOneGeoipProcessor(processors, downloadDatabaseOnPipelineCreation)) { + ids.add(configuration.getId()); + } + } + return Collections.unmodifiableSet(ids); } /** @@ -283,7 +283,15 @@ private static List pipelineConfigurationsWithGeoIpProces * @return true if a geoip processor is found in the processor list. */ private static boolean hasAtLeastOneGeoipProcessor(List> processors, boolean downloadDatabaseOnPipelineCreation) { - return processors != null && processors.stream().anyMatch(p -> hasAtLeastOneGeoipProcessor(p, downloadDatabaseOnPipelineCreation)); + if (processors != null) { + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (Map processor : processors) { + if (hasAtLeastOneGeoipProcessor(processor, downloadDatabaseOnPipelineCreation)) { + return true; + } + } + } + return false; } /** @@ -317,7 +325,7 @@ private static boolean hasAtLeastOneGeoipProcessor(Map processor } /** - * Check if a processor config is has an on_failure clause containing at least a geoip processor. + * Check if a processor config has an on_failure clause containing at least a geoip processor. * @param processor Processor config. * @param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false. * @return true if a geoip processor is found in the processor list. @@ -327,16 +335,17 @@ private static boolean isProcessorWithOnFailureGeoIpProcessor( Map processor, boolean downloadDatabaseOnPipelineCreation ) { - return processor != null - && processor.values() - .stream() - .anyMatch( - value -> value instanceof Map - && hasAtLeastOneGeoipProcessor( - ((Map>>) value).get("on_failure"), - downloadDatabaseOnPipelineCreation - ) - ); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (Object value : processor.values()) { + if (value instanceof Map + && hasAtLeastOneGeoipProcessor( + ((Map>>) value).get("on_failure"), + downloadDatabaseOnPipelineCreation + )) { + return true; + } + } + return false; } /** diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 5059272aa2e23..97a68d9807688 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -239,7 +240,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St processorType, processorTag, propertyName, - "property isn't a boolean, but of type [" + value.getClass().getName() + "]" + Strings.format("property isn't a boolean, but of type [%s]", value.getClass().getName()) ); } From 47c4909e6d214c44ff2d0999819d19daf4377a45 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Tue, 22 Oct 2024 16:05:25 -0400 Subject: [PATCH 035/202] Optimize IngestService#resolvePipelinesFromIndexTemplates (#115348) --- .../MetadataIndexTemplateService.java | 68 ++++++++++++++----- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index ccdfaa5518aee..d6ed28454df96 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1200,6 +1200,42 @@ static ClusterState innerPutTemplate( return ClusterState.builder(currentState).metadata(builder).build(); } + /** + * A private, local alternative to elements.stream().anyMatch(predicate) for micro-optimization reasons. + */ + private static boolean anyMatch(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return true; + } + } + return false; + } + + /** + * A private, local alternative to elements.stream().noneMatch(predicate) for micro-optimization reasons. + */ + private static boolean noneMatch(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return false; + } + } + return true; + } + + /** + * A private, local alternative to elements.stream().filter(predicate).findFirst() for micro-optimization reasons. + */ + private static Optional findFirst(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return Optional.of(e); + } + } + return Optional.empty(); + } + /** * Finds index templates whose index pattern matched with the given index name. In the case of * hidden indices, a template with a match all pattern or global template will not be returned. @@ -1219,15 +1255,14 @@ public static List findV1Templates(Metadata metadata, Str final List matchedTemplates = new ArrayList<>(); for (IndexTemplateMetadata template : metadata.templates().values()) { if (isHidden == null || isHidden == Boolean.FALSE) { - final boolean matched = template.patterns().stream().anyMatch(patternMatchPredicate); - if (matched) { + if (anyMatch(template.patterns(), patternMatchPredicate)) { matchedTemplates.add(template); } } else { assert isHidden == Boolean.TRUE; - final boolean isNotMatchAllTemplate = template.patterns().stream().noneMatch(Regex::isMatchAllPattern); + final boolean isNotMatchAllTemplate = noneMatch(template.patterns(), Regex::isMatchAllPattern); if (isNotMatchAllTemplate) { - if (template.patterns().stream().anyMatch(patternMatchPredicate)) { + if (anyMatch(template.patterns(), patternMatchPredicate)) { matchedTemplates.add(template); } } @@ -1238,19 +1273,21 @@ public static List findV1Templates(Metadata metadata, Str // this is complex but if the index is not hidden in the create request but is hidden as the result of template application, // then we need to exclude global templates if (isHidden == null) { - final Optional templateWithHiddenSetting = matchedTemplates.stream() - .filter(template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings())) - .findFirst(); + final Optional templateWithHiddenSetting = findFirst( + matchedTemplates, + template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings()) + ); if (templateWithHiddenSetting.isPresent()) { final boolean templatedIsHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(templateWithHiddenSetting.get().settings()); if (templatedIsHidden) { // remove the global templates - matchedTemplates.removeIf(current -> current.patterns().stream().anyMatch(Regex::isMatchAllPattern)); + matchedTemplates.removeIf(current -> anyMatch(current.patterns(), Regex::isMatchAllPattern)); } // validate that hidden didn't change - final Optional templateWithHiddenSettingPostRemoval = matchedTemplates.stream() - .filter(template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings())) - .findFirst(); + final Optional templateWithHiddenSettingPostRemoval = findFirst( + matchedTemplates, + template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings()) + ); if (templateWithHiddenSettingPostRemoval.isEmpty() || templateWithHiddenSetting.get() != templateWithHiddenSettingPostRemoval.get()) { throw new IllegalStateException( @@ -1313,14 +1350,13 @@ static List> findV2CandidateTemplates(Met * built with a template that none of its indices match. */ if (isHidden == false || template.getDataStreamTemplate() != null) { - final boolean matched = template.indexPatterns().stream().anyMatch(patternMatchPredicate); - if (matched) { + if (anyMatch(template.indexPatterns(), patternMatchPredicate)) { candidates.add(Tuple.tuple(name, template)); } } else { - final boolean isNotMatchAllTemplate = template.indexPatterns().stream().noneMatch(Regex::isMatchAllPattern); + final boolean isNotMatchAllTemplate = noneMatch(template.indexPatterns(), Regex::isMatchAllPattern); if (isNotMatchAllTemplate) { - if (template.indexPatterns().stream().anyMatch(patternMatchPredicate)) { + if (anyMatch(template.indexPatterns(), patternMatchPredicate)) { candidates.add(Tuple.tuple(name, template)); } } @@ -1334,7 +1370,7 @@ static List> findV2CandidateTemplates(Met // Checks if a global template specifies the `index.hidden` setting. This check is important because a global // template shouldn't specify the `index.hidden` setting, we leave it up to the caller to handle this situation. private static boolean isGlobalAndHasIndexHiddenSetting(Metadata metadata, ComposableIndexTemplate template, String templateName) { - return template.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern) + return anyMatch(template.indexPatterns(), Regex::isMatchAllPattern) && IndexMetadata.INDEX_HIDDEN_SETTING.exists(resolveSettings(metadata, templateName)); } From ee51f5fb7673ca10db8aed3a4049e4f7683e857e Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Tue, 22 Oct 2024 16:05:52 -0400 Subject: [PATCH 036/202] Optimize IndexTemplateRegistry#clusterChanged (#115347) --- .../cluster/metadata/ComposableIndexTemplate.java | 13 ++++++++++--- .../xpack/core/ilm/IndexLifecycleMetadata.java | 12 ++++++++---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 6d1a874e1c72b..ae7cff6312155 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -28,6 +28,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -189,9 +191,14 @@ public List getRequiredComponentTemplates() { if (ignoreMissingComponentTemplates == null) { return componentTemplates; } - return componentTemplates.stream() - .filter(componentTemplate -> ignoreMissingComponentTemplates.contains(componentTemplate) == false) - .toList(); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + List required = new ArrayList<>(componentTemplates.size()); + for (String template : componentTemplates) { + if (ignoreMissingComponentTemplates.contains(template) == false) { + required.add(template); + } + } + return Collections.unmodifiableList(required); } @Nullable diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java index 3674103eda215..f8cb371687d72 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -92,10 +93,13 @@ public OperationMode getOperationMode() { } public Map getPolicies() { - return policyMetadatas.values() - .stream() - .map(LifecyclePolicyMetadata::getPolicy) - .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + Map policies = new HashMap<>(policyMetadatas.size()); + for (LifecyclePolicyMetadata policyMetadata : policyMetadatas.values()) { + LifecyclePolicy policy = policyMetadata.getPolicy(); + policies.put(policy.getName(), policy); + } + return Collections.unmodifiableMap(policies); } @Override From c34f766a3059ad6e6b055a6a9a34e19d81a83832 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 07:36:51 +1100 Subject: [PATCH 037/202] Mute org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval #115368 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a3fd1fa395f5d..0dc82b311de7f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -321,6 +321,9 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115368 # Examples: # From ce20a4d8e7059fd6277181e5989f0db77f81e952 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 22 Oct 2024 15:37:13 -0500 Subject: [PATCH 038/202] Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates (#115359) --- docs/changelog/115359.yaml | 6 + .../test/ingest/80_ingest_simulate.yml | 144 +++++++++++++++++- .../action/bulk/BulkFeatures.java | 4 +- .../bulk/TransportSimulateBulkAction.java | 42 ++++- 4 files changed, 186 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/115359.yaml diff --git a/docs/changelog/115359.yaml b/docs/changelog/115359.yaml new file mode 100644 index 0000000000000..65b3086dfc8d0 --- /dev/null +++ b/docs/changelog/115359.yaml @@ -0,0 +1,6 @@ +pr: 115359 +summary: Adding support for simulate ingest mapping adddition for indices with mappings + that do not come from templates +area: Ingest Node +type: enhancement +issues: [] diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index d4aa2f1ad4467..4d1a62c6f179e 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1229,7 +1229,7 @@ setup: - requires: cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.16" + reason: "ingest simulate mapping addition added in 8.17" - do: headers: @@ -1465,7 +1465,7 @@ setup: - requires: cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.16" + reason: "ingest simulate mapping addition added in 8.17" - do: indices.put_template: @@ -1571,3 +1571,143 @@ setup: - match: { docs.0.doc._source.foo: 3 } - match: { docs.0.doc._source.bar: "not a boolean" } - not_exists: docs.0.doc.error + +--- +"Test mapping addition works with indices without templates": + # In this test, we make sure that when we have an index that has mapping but was not built with a template, that the additional_mapping + # is merged in with that template. + + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.support.non.template.mapping"] + reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" + + # First, make sure that validation fails before we create the index (since we are only defining to bar field but trying to index a value + # for foo. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + + - do: + indices.create: + index: foo-1 + body: + mappings: + dynamic: strict + properties: + foo: + type: integer + - match: { acknowledged: true } + + # Now make sure that the mapping for the newly-created index is getting picked up. Validation fails because it only defined a mapping + # for foo, not for bar. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + + # Now we make sure that the index's mapping gets merged with the mapping_addition: + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - not_exists: docs.0.doc.error + + # This last call to simulate is just making sure that if there are no templates, no index mappings, no substitutions, and no mapping + # addition, then validation does not fail + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: nonexistent + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "nonexistent" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - not_exists: docs.0.doc.error diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 22cf8a2260d87..62a9b88cb6a57 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -19,6 +19,7 @@ import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; +import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING; public class BulkFeatures implements FeatureSpecification { public Set getFeatures() { @@ -27,7 +28,8 @@ public Set getFeatures() { SIMULATE_MAPPING_VALIDATION_TEMPLATES, SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS, SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS, - SIMULATE_MAPPING_ADDITION + SIMULATE_MAPPING_ADDITION, + SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING ); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 0888b70f5399c..1353fa78595ef 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -84,6 +84,7 @@ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { ); public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions"); public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition"); + public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping"); private final IndicesService indicesService; private final NamedXContentRegistry xContentRegistry; private final Set indexSettingProviders; @@ -258,6 +259,10 @@ private Exception validateMappings( String matchingTemplate = findV2Template(simulatedState.metadata(), request.index(), false); if (matchingTemplate != null) { + /* + * The index matches a v2 template (including possibly one or more of the substitutions passed in). So we use this + * template, and then possibly apply the mapping addition if it is not null, and validate. + */ final Template template = TransportSimulateIndexTemplateAction.resolveTemplate( matchingTemplate, request.index(), @@ -273,13 +278,36 @@ private Exception validateMappings( validateUpdatedMappings(mappings, mergedMappings, request, sourceToParse); } else { List matchingTemplates = findV1Templates(simulatedState.metadata(), request.index(), false); - final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( - "{}", - matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), - xContentRegistry - ); - final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); - validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + if (matchingTemplates.isEmpty() == false) { + /* + * The index matches v1 mappings. These are not compatible with component_template_substitutions or + * index_template_substitutions, but we can apply a mapping_addition. + */ + final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( + "{}", + matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), + xContentRegistry + ); + final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } else if (indexAbstraction != null && mappingAddition.isEmpty() == false) { + /* + * The index matched no templates of any kind, including the substitutions. But it might have a mapping. So we + * merge in the mapping addition if it exists, and validate. + */ + MappingMetadata mappingFromIndex = clusterService.state().metadata().index(indexAbstraction.getName()).mapping(); + CompressedXContent currentIndexCompressedXContent = mappingFromIndex == null ? null : mappingFromIndex.source(); + CompressedXContent combinedMappings = mergeMappings(currentIndexCompressedXContent, mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } else { + /* + * The index matched no templates and had no mapping of its own. If there were component template substitutions + * or index template substitutions, they didn't match anything. So just apply the mapping addition if it exists, + * and validate. + */ + final CompressedXContent combinedMappings = mergeMappings(null, mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } } } } catch (Exception e) { From 19a35a4592cc250c0b53a76d474ec962187e9309 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 22 Oct 2024 17:29:57 -0500 Subject: [PATCH 039/202] Unmuting 80_ingest_simulate method (#115370) It looks like this test was accidentally re-muted. Unmuting again. Closes #112575 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 0dc82b311de7f..a7f36cdd06d66 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -99,9 +99,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112424 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} - issue: https://github.com/elastic/elasticsearch/issues/112575 - class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests method: testClientServiceMutualAuthentication issue: https://github.com/elastic/elasticsearch/issues/112529 From 5c2e28e7521af2b82e2d73a18ea33a1e682c040f Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 22 Oct 2024 15:42:19 -0700 Subject: [PATCH 040/202] Add tests for migration between source modes in logsdb data stream (#115283) --- .../logsdb/LogsDbSourceModeMigrationIT.java | 290 ++++++++++++++++++ .../logsdb/LogsIndexModeCustomSettingsIT.java | 11 - .../xpack/logsdb/LogsIndexModeRestTestIT.java | 11 + 3 files changed, 301 insertions(+), 11 deletions(-) create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java new file mode 100644 index 0000000000000..adb23567e3933 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class LogsDbSourceModeMigrationIT extends LogsIndexModeRestTestIT { + public static final String INDEX_TEMPLATE = """ + { + "index_patterns": ["my-logs-*-*"], + "priority": 100, + "data_stream": {}, + "composed_of": [ + "my-logs-mapping", + "my-logs-original-source", + "my-logs-migrated-source" + ], + "ignore_missing_component_templates": ["my-logs-original-source", "my-logs-migrated-source"] + } + """; + + public static final String MAPPING_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mode": "logsdb" + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date", + "format": "epoch_millis" + }, + "message": { + "type": "text" + }, + "method": { + "type": "keyword" + }, + "hits": { + "type": "long" + } + } + } + } + }"""; + + public static final String STORED_SOURCE_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "stored" + } + } + } + }"""; + + public static final String SYNTHETIC_SOURCE_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "synthetic" + } + } + } + }"""; + + @ClassRule() + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .module("constant-keyword") + .module("data-streams") + .module("mapper-extras") + .module("x-pack-aggregate-metric") + .module("x-pack-stack") + .setting("xpack.security.enabled", "false") + .setting("xpack.otel_data.registry.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("cluster.logsdb.enabled", "true") + .setting("stack.templates.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Before + public void setup() { + client = client(); + } + + private RestClient client; + + public void testSwitchFromStoredToSyntheticSource() throws IOException { + assertOK(putComponentTemplate(client, "my-logs-mapping", MAPPING_COMPONENT_TEMPLATE)); + assertOK(putComponentTemplate(client, "my-logs-original-source", STORED_SOURCE_COMPONENT_TEMPLATE)); + + assertOK(putTemplate(client, "my-logs", INDEX_TEMPLATE)); + assertOK(createDataStream(client, "my-logs-ds-test")); + + var initialSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 0), + "index.mapping.source.mode" + ); + assertThat(initialSourceMode, equalTo("stored")); + var initialIndexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "my-logs-ds-test", 0), "index.mode"); + assertThat(initialIndexMode, equalTo("logsdb")); + + var indexedWithStoredSource = new ArrayList(); + var indexedWithSyntheticSource = new ArrayList(); + for (int i = 0; i < 10; i++) { + indexedWithStoredSource.add(generateDoc()); + indexedWithSyntheticSource.add(generateDoc()); + } + + Response storedSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithStoredSource, 0); + assertOK(storedSourceBulkResponse); + assertThat(entityAsMap(storedSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + assertOK(putComponentTemplate(client, "my-logs-migrated-source", SYNTHETIC_SOURCE_COMPONENT_TEMPLATE)); + var rolloverResponse = rolloverDataStream(client, "my-logs-ds-test"); + assertOK(rolloverResponse); + assertThat(entityAsMap(rolloverResponse).get("rolled_over"), is(true)); + + var finalSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 1), + "index.mapping.source.mode" + ); + assertThat(finalSourceMode, equalTo("synthetic")); + + Response syntheticSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithSyntheticSource, 10); + assertOK(syntheticSourceBulkResponse); + assertThat(entityAsMap(syntheticSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + var allDocs = Stream.concat(indexedWithStoredSource.stream(), indexedWithSyntheticSource.stream()).toList(); + + var sourceList = search(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(allDocs.size()), "my-logs-ds-test"); + assertThat(sourceList.size(), equalTo(allDocs.size())); + + for (int i = 0; i < sourceList.size(); i++) { + var expected = XContentHelper.convertToMap(BytesReference.bytes(allDocs.get(i)), false, XContentType.JSON).v2(); + assertThat(sourceList.get(i), equalTo(expected)); + } + } + + public void testSwitchFromSyntheticToStoredSource() throws IOException { + assertOK(putComponentTemplate(client, "my-logs-mapping", MAPPING_COMPONENT_TEMPLATE)); + assertOK(putComponentTemplate(client, "my-logs-original-source", SYNTHETIC_SOURCE_COMPONENT_TEMPLATE)); + + assertOK(putTemplate(client, "my-logs", INDEX_TEMPLATE)); + assertOK(createDataStream(client, "my-logs-ds-test")); + + var initialSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 0), + "index.mapping.source.mode" + ); + assertThat(initialSourceMode, equalTo("synthetic")); + var initialIndexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "my-logs-ds-test", 0), "index.mode"); + assertThat(initialIndexMode, equalTo("logsdb")); + + var indexedWithSyntheticSource = new ArrayList(); + var indexedWithStoredSource = new ArrayList(); + for (int i = 0; i < 10; i++) { + indexedWithSyntheticSource.add(generateDoc()); + indexedWithStoredSource.add(generateDoc()); + } + + Response syntheticSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithSyntheticSource, 0); + assertOK(syntheticSourceBulkResponse); + assertThat(entityAsMap(syntheticSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + assertOK(putComponentTemplate(client, "my-logs-migrated-source", STORED_SOURCE_COMPONENT_TEMPLATE)); + var rolloverResponse = rolloverDataStream(client, "my-logs-ds-test"); + assertOK(rolloverResponse); + assertThat(entityAsMap(rolloverResponse).get("rolled_over"), is(true)); + + var finalSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 1), + "index.mapping.source.mode" + ); + assertThat(finalSourceMode, equalTo("stored")); + + Response storedSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithStoredSource, 10); + assertOK(storedSourceBulkResponse); + assertThat(entityAsMap(storedSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + var allDocs = Stream.concat(indexedWithSyntheticSource.stream(), indexedWithStoredSource.stream()).toList(); + + var sourceList = search(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(allDocs.size()), "my-logs-ds-test"); + assertThat(sourceList.size(), equalTo(allDocs.size())); + + for (int i = 0; i < sourceList.size(); i++) { + var expected = XContentHelper.convertToMap(BytesReference.bytes(allDocs.get(i)), false, XContentType.JSON).v2(); + assertThat(sourceList.get(i), equalTo(expected)); + } + } + + private static Response bulkIndex(RestClient client, String dataStreamName, List documents, int startId) + throws IOException { + var sb = new StringBuilder(); + int id = startId; + for (var document : documents) { + sb.append(Strings.format("{ \"create\": { \"_id\" : \"%d\" } }", id)).append("\n"); + sb.append(Strings.toString(document)).append("\n"); + id++; + } + + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(sb.toString()); + bulkRequest.addParameter("refresh", "true"); + return client.performRequest(bulkRequest); + } + + @SuppressWarnings("unchecked") + private List> search(SearchSourceBuilder search, String dataStreamName) throws IOException { + var request = new Request("GET", "/" + dataStreamName + "/_search"); + request.setJsonEntity(Strings.toString(search)); + var searchResponse = client.performRequest(request); + assertOK(searchResponse); + + Map searchResponseMap = XContentHelper.convertToMap( + XContentType.JSON.xContent(), + searchResponse.getEntity().getContent(), + false + ); + var hitsMap = (Map) searchResponseMap.get("hits"); + + var hitsList = (List>) hitsMap.get("hits"); + assertThat(hitsList.size(), greaterThan(0)); + + return hitsList.stream() + .sorted(Comparator.comparingInt((Map hit) -> Integer.parseInt((String) hit.get("_id")))) + .map(hit -> (Map) hit.get("_source")) + .toList(); + } + + private static XContentBuilder generateDoc() throws IOException { + var doc = XContentFactory.jsonBuilder(); + doc.startObject(); + { + doc.field("@timestamp", Long.toString(randomMillisUpToYear9999())); + doc.field("message", randomAlphaOfLengthBetween(20, 50)); + doc.field("method", randomAlphaOfLength(3)); + doc.field("hits", randomLong()); + } + doc.endObject(); + + return doc; + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java index c5ccee1d36b72..f529b9fa1db96 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.logsdb; -import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; @@ -496,16 +495,6 @@ public void testIgnoreAboveSetting() throws IOException { } } - private static Map getMapping(final RestClient client, final String indexName) throws IOException { - final Request request = new Request("GET", "/" + indexName + "/_mapping"); - - Map mappings = ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get( - "mappings" - ); - - return mappings; - } - private Function> subObject(String key) { return (mapAsObject) -> (Map) ((Map) mapAsObject).get(key); } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java index dbee5d1d2de8c..cc7f5bdb33871 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java @@ -98,4 +98,15 @@ protected static Response putClusterSetting(final RestClient client, final Strin request.setJsonEntity("{ \"transient\": { \"" + settingName + "\": " + settingValue + " } }"); return client.performRequest(request); } + + @SuppressWarnings("unchecked") + protected static Map getMapping(final RestClient client, final String indexName) throws IOException { + final Request request = new Request("GET", "/" + indexName + "/_mapping"); + + Map mappings = ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get( + "mappings" + ); + + return mappings; + } } From 64281ddf27e5a2df2e57ee00ee32fc8fd91484f0 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 22 Oct 2024 17:36:40 -0700 Subject: [PATCH 041/202] Don't run mixed cluster tests against the current version (#115377) By definition a "mixed cluster" has nodes of two different versions of Elasticsearch. It doesn't make sense to run these tests against the current version of Elasticsearch. --- x-pack/plugin/downsample/qa/mixed-cluster/build.gradle | 4 ++-- x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle | 3 ++- x-pack/plugin/inference/qa/mixed-cluster/build.gradle | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle index 61aa2927e46de..6b1c7e42c0fde 100644 --- a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle @@ -5,7 +5,7 @@ * 2.0. */ -import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -26,7 +26,7 @@ restResources { } def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter("8.10.0"); + return bwcVersion.onOrAfter("8.10.0") && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index e4223f03c3a03..fb47255e8d52e 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -27,7 +28,7 @@ GradleUtils.extendSourceSet(project, "javaRestTest", "yamlRestTest") // ESQL is available in 8.11 or later def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter(Version.fromString("8.11.0")); + return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> diff --git a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle index 1d5369468b054..0bc4813f25137 100644 --- a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle @@ -20,7 +20,7 @@ dependencies { // inference is available in 8.11 or later def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter(Version.fromString("8.11.0")); + return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> From 43b93dcff75fd7f44586546283d6e37b0779d222 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:21:23 +1100 Subject: [PATCH 042/202] Mute org.elasticsearch.reservedstate.service.FileSettingsServiceTests testProcessFileChanges #115280 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a7f36cdd06d66..6bbccf8bb05bb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -321,6 +321,9 @@ tests: - class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval issue: https://github.com/elastic/elasticsearch/issues/115368 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testProcessFileChanges + issue: https://github.com/elastic/elasticsearch/issues/115280 # Examples: # From ba7d0954efff88295697b7d6c9809b9f8f0ba636 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Wed, 23 Oct 2024 07:57:32 +0200 Subject: [PATCH 043/202] Fix synonyms CI tests timeout (#114641) --- muted-tests.yml | 9 --------- .../test/synonyms/10_synonyms_put.yml | 8 ++++++++ .../test/synonyms/110_synonyms_invalid.yml | 5 +++++ .../test/synonyms/20_synonyms_get.yml | 5 ++++- .../test/synonyms/30_synonyms_delete.yml | 4 ++++ .../test/synonyms/40_synonyms_sets_get.yml | 19 ++++++------------- .../test/synonyms/50_synonym_rule_put.yml | 5 ++++- .../test/synonyms/60_synonym_rule_get.yml | 7 ++++--- .../test/synonyms/70_synonym_rule_delete.yml | 5 +++++ .../test/synonyms/80_synonyms_from_index.yml | 6 +++++- .../90_synonyms_reloading_for_synset.yml | 6 +++++- 11 files changed, 50 insertions(+), 29 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 6bbccf8bb05bb..ccb387986551c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -259,15 +259,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultElserIT method: testInferCreatesDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114503 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym set not found} - issue: https://github.com/elastic/elasticsearch/issues/114432 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Get a synonym rule} - issue: https://github.com/elastic/elasticsearch/issues/114443 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym rule not found} - issue: https://github.com/elastic/elasticsearch/issues/114444 - class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT method: testGetModel issue: https://github.com/elastic/elasticsearch/issues/114657 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml index bcd58f3f7bd64..675b98133ce11 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml @@ -15,6 +15,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-update-synonyms @@ -58,6 +62,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-empty-synonyms diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml index d3d0a3bb4df70..4e77e10495109 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml @@ -11,6 +11,11 @@ setup: synonyms_set: synonyms: "foo => bar, baz" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: indices.create: index: test_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml index 3494f33466ce4..5e6d4ec2341ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -14,6 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Get synonyms set": @@ -31,7 +35,6 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" - --- "Get synonyms set - not found": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml index 351ff4e186d8a..23c907f6a1137 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml @@ -12,6 +12,10 @@ setup: - synonyms: "bye => goodbye" id: "test-id-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Delete synonyms set": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml index 723c41e163eb8..7c145dafd81cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml @@ -9,6 +9,12 @@ setup: synonyms_set: - synonyms: "hello, hi" - synonyms: "goodbye, bye" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.put_synonym: id: test-synonyms-1 @@ -23,21 +29,8 @@ setup: body: synonyms_set: - synonyms: "pc, computer" - # set logging to debug for issue: https://github.com/elastic/elasticsearch/issues/102261 - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: DEBUG --- -teardown: - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: null ---- "List synonyms set": - do: synonyms.get_synonyms_sets: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml index f3711bb0774ca..d8611000fe465 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml @@ -14,7 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" - + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Update a synonyms rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml index 2a7c8aff89d8e..0c962b51e08cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml @@ -13,11 +13,12 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - index: .synonyms - timeout: 1m - wait_for_status: green + wait_for_no_initializing_shards: true + --- "Get a synonym rule": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml index a4853b0b6d414..41ab293158a35 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml @@ -14,6 +14,11 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + --- "Delete synonym rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml index 89ad933370e1c..3aba0f0b4b78b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml @@ -2,7 +2,6 @@ setup: - requires: cluster_features: ["gte_v8.10.0"] reason: Loading synonyms from index is introduced in 8.10.0 - # Create a new synonyms set - do: synonyms.put_synonym: @@ -14,6 +13,11 @@ setup: - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create an index with synonym_filter that uses that synonyms set - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index dc94b36222402..1ceb5b43b8129 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -3,7 +3,6 @@ - requires: cluster_features: ["gte_v8.10.0"] reason: Reloading analyzers for specific synonym set is introduced in 8.10.0 - # Create synonyms_set1 - do: synonyms.put_synonym: @@ -26,6 +25,11 @@ - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create my_index1 with synonym_filter that uses synonyms_set1 - do: indices.create: From 32dee6aaaeb18a8d6d4f0fee8bbf338e8991650d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:02 +0200 Subject: [PATCH 044/202] [test] Dynamically pick up the upper bound snapshot index version (#114703) Pick an index version between the minimum compatible and latest known version for snapshot testing. --- .../snapshots/AbstractSnapshotIntegTestCase.java | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 8bc81fef2157d..7a72a7bd0daf0 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -366,15 +365,9 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { - initWithSnapshotVersion( - repoName, - repoPath, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) - ); + initWithSnapshotVersion(repoName, repoPath, IndexVersionUtils.randomVersion()); } } From 530d15029eb8ada2cd7cd76ea5be15adbfc0e639 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:16 +0200 Subject: [PATCH 045/202] Remove direct cloning of BytesTransportRequests (#114808) All request handlers should be able to read `BytesTransportRequest` to a class than can copied by re-serializing. Direct copying was only necessary by the legacy `JOIN_VALIDATE_ACTION_NAME` request handler. See #89926 --- .../test/transport/MockTransportService.java | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index fd376fcd07688..18c591166e720 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -39,7 +39,6 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -50,7 +49,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -586,13 +584,8 @@ public void sendRequest( // poor mans request cloning... BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest; - if (request instanceof BytesTransportRequest) { - clonedRequest = copyRawBytesForBwC(bStream); - } else { - RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); - clonedRequest = reg.newRequest(bStream.bytes().streamInput()); - } + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); + final TransportRequest clonedRequest = reg.newRequest(bStream.bytes().streamInput()); assert clonedRequest.getClass().equals(MasterNodeRequestHelper.unwrapTermOverride(request).getClass()) : clonedRequest + " vs " + request; @@ -640,15 +633,6 @@ protected void doRun() throws IOException { } } - // Some request handlers read back a BytesTransportRequest - // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), - // in those cases we just copy the raw bytes back to a BytesTransportRequest. - // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private static TransportRequest copyRawBytesForBwC(BytesStreamOutput bStream) throws IOException { - return new BytesTransportRequest(bStream.bytes().streamInput()); - } - @Override public void clearCallback() { synchronized (this) { From aa70c41abaaecd94676a019fee464cf71b453f51 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 09:30:48 +0200 Subject: [PATCH 046/202] [test] Always assume that the old cluster support replication of closed indices (#114314) Support for replicating closed indices was added in #39506 (7.1.0), we can expect the the cluster always supports replication of closed indices in 8.0/9.0 --- .../elasticsearch/upgrades/FullClusterRestartIT.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 73f291da15ead..92a704f793dc2 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -1203,15 +1202,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) // This check can be removed (always assume true) - var originalClusterSupportsReplicationOfClosedIndices = oldClusterHasFeature(RestTestLegacyFeatures.REPLICATION_OF_CLOSED_INDICES); - - if (originalClusterSupportsReplicationOfClosedIndices) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); From 387062eb808f2c8a6c0724d1317b57176a60539d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 23 Oct 2024 10:20:42 +0200 Subject: [PATCH 047/202] Sometimes delegate to SourceLoader in ValueSourceReaderOperator for required stored fields (#115114) If source is required by a block loader then the StoredFieldsSpec that gets populated should be enhanced by SourceLoader#requiredStoredFields(...) in ValuesSourceReaderOperator. Otherwise in case of synthetic source many stored fields aren't loaded, which causes only a subset of _source to be synthesized. For example when unmapped fields exist or field values that exceed configured ignore above will not appear is _source. This happens when field types fallback to a block loader implementation that uses _source. The required field values are then extracted from the source once loaded. This change also reverts the production code changes introduced via #114903. That change only ensured that _ignored_source field was added to the required list of stored fields. In reality more fields could be required. This change is better fix, since it handles also other cases and the SourceLoader implementation indicates which stored fields are needed. Closes #115076 --- .../extras/MatchOnlyTextFieldMapper.java | 3 +- .../mapper/extras/ScaledFloatFieldMapper.java | 3 +- muted-tests.yml | 12 --- .../mapper/AbstractGeometryFieldMapper.java | 3 +- .../index/mapper/BlockSourceReader.java | 47 ++++------ .../index/mapper/BooleanFieldMapper.java | 2 +- .../index/mapper/DateFieldMapper.java | 3 +- .../index/mapper/KeywordFieldMapper.java | 3 +- .../index/mapper/NumberFieldMapper.java | 65 ++++--------- .../index/mapper/TextFieldMapper.java | 14 +-- .../index/mapper/BlockSourceReaderTests.java | 2 +- .../index/mapper/TextFieldMapperTests.java | 3 +- .../KeywordFieldSyntheticSourceSupport.java | 5 + .../index/mapper/MapperServiceTestCase.java | 5 + .../index/mapper/MapperTestCase.java | 47 +++++++--- ...xtFieldFamilySyntheticSourceTestSetup.java | 20 +--- .../lucene/ValuesSourceReaderOperator.java | 27 ++++-- .../mapper/SemanticTextFieldMapper.java | 3 +- ..._esql_synthetic_source_disabled_fields.yml | 92 +++++++++++++++++-- .../test/51_esql_synthetic_source.yml | 77 ++++++++++++++++ .../unsignedlong/UnsignedLongFieldMapper.java | 3 +- 21 files changed, 276 insertions(+), 163 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index cd252fcff2376..5904169308fab 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -364,8 +364,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); // MatchOnlyText never has norms, so we have to use the field names field BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup, sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 1f647cb977cf5..b845545133e19 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -319,8 +319,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup); } @Override diff --git a/muted-tests.yml b/muted-tests.yml index ccb387986551c..45b1398df7ace 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,18 +282,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115066 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115073 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115074 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115076 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 3512989c115ee..c38b5beeb55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -189,8 +189,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } protected abstract Object nullValueAsSource(T nullValue); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 105943c732a5e..19a1cce746172 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; /** * Loads values from {@code _source}. This whole process is very slow and cast-tastic, @@ -30,14 +29,6 @@ * slow. */ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { - - // _ignored_source is needed when source mode is synthetic. - static final StoredFieldsSpec NEEDS_SOURCE_AND_IGNORED_SOURCE = new StoredFieldsSpec( - true, - false, - Set.of(IgnoredSourceFieldMapper.NAME) - ); - private final ValueFetcher fetcher; private final List ignoredValues = new ArrayList<>(); private final DocIdSetIterator iter; @@ -100,12 +91,10 @@ public interface LeafIteratorLookup { private abstract static class SourceBlockLoader implements BlockLoader { protected final ValueFetcher fetcher; private final LeafIteratorLookup lookup; - private final SourceFieldMapper.Mode sourceMode; - private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { + private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { this.fetcher = fetcher; this.lookup = lookup; - this.sourceMode = sourceMode; } @Override @@ -115,7 +104,7 @@ public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) @Override public final StoredFieldsSpec rowStrideStoredFieldSpec() { - return sourceMode == SourceFieldMapper.Mode.SYNTHETIC ? NEEDS_SOURCE_AND_IGNORED_SOURCE : StoredFieldsSpec.NEEDS_SOURCE; + return StoredFieldsSpec.NEEDS_SOURCE; } @Override @@ -151,8 +140,8 @@ public final String toString() { * Load {@code boolean}s from {@code _source}. */ public static class BooleansBlockLoader extends SourceBlockLoader { - public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -191,8 +180,8 @@ public String toString() { * Load {@link BytesRef}s from {@code _source}. */ public static class BytesRefsBlockLoader extends SourceBlockLoader { - public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -202,7 +191,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException { - return new BytesRefs(fetcher, iter, null); + return new BytesRefs(fetcher, iter); } @Override @@ -212,8 +201,8 @@ protected String name() { } public static class GeometriesBlockLoader extends SourceBlockLoader { - public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -223,7 +212,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { - return new Geometries(fetcher, iter, null); + return new Geometries(fetcher, iter); } @Override @@ -235,7 +224,7 @@ protected String name() { private static class BytesRefs extends BlockSourceReader { private final BytesRef scratch = new BytesRef(); - BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -252,7 +241,7 @@ public String toString() { private static class Geometries extends BlockSourceReader { - Geometries(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + Geometries(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -275,8 +264,8 @@ public String toString() { * Load {@code double}s from {@code _source}. */ public static class DoublesBlockLoader extends SourceBlockLoader { - public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -315,8 +304,8 @@ public String toString() { * Load {@code int}s from {@code _source}. */ public static class IntsBlockLoader extends SourceBlockLoader { - public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -355,8 +344,8 @@ public String toString() { * Load {@code long}s from {@code _source}. */ public static class LongsBlockLoader extends SourceBlockLoader { - public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index c2bf9e18bfeec..5aaaf7dce83c9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -314,7 +314,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isIndexed() || isStored() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup, blContext.indexSettings().getIndexMappingSourceMode()); + return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index d05f0e477db09..87e4ce5f90479 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -793,8 +793,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 802680e7f373e..ecc708bc94614 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -634,8 +634,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } SourceValueFetcher fetcher = sourceValueFetcher(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext)); } private BlockSourceReader.LeafIteratorLookup sourceBlockLoaderLookup(BlockLoaderContext blContext) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 3608e8ab261c1..55ed1e10428aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -462,12 +462,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, FLOAT("float", NumericType.FLOAT) { @@ -650,12 +646,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -804,12 +796,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, BYTE("byte", NumericType.BYTE) { @@ -921,12 +909,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1038,12 +1022,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1229,12 +1209,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, LONG("long", NumericType.LONG) { @@ -1380,12 +1356,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1663,11 +1635,7 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { abstract BlockLoader blockLoaderFromDocValues(String fieldName); - abstract BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ); + abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup); } public static class NumberFieldType extends SimpleMappedFieldType { @@ -1806,8 +1774,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 3f77edc819602..253f70f4fda47 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -1007,20 +1007,8 @@ protected String delegatingTo() { if (isStored()) { return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(name()); } - if (isSyntheticSource && syntheticSourceDelegate == null) { - /* - * When we're in synthetic source mode we don't currently - * support text fields that are not stored and are not children - * of perfect keyword fields. We'd have to load from the parent - * field and then convert the result to a string. In this case, - * even if we would synthesize the source, the current field - * would be missing. - */ - return null; - } SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext)); } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java index 286be8d12570d..357ada3ad656d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -51,7 +51,7 @@ public void testEmptyArray() throws IOException { private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { ValueFetcher valueFetcher = SourceValueFetcher.toString(Set.of("field")); BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromNorms("field"); - BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup, null); + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup); assertThat(loader.columnAtATimeReader(ctx), nullValue()); BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 86914cfe9ced7..c2375e948fda0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1353,6 +1353,7 @@ private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSo }; MapperService mapper = createMapperService(syntheticSource ? syntheticSourceMapping(buildFields) : mapping(buildFields)); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field.sub"); - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java index 0d05c3d0cd77b..502ffdde62e5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java @@ -37,6 +37,11 @@ public class KeywordFieldSyntheticSourceSupport implements MapperTestCase.Synthe this.docValues = useFallbackSyntheticSource == false || ESTestCase.randomBoolean(); } + @Override + public boolean ignoreAbove() { + return ignoreAbove != null; + } + @Override public boolean preservesExactSource() { // We opt in into fallback synthetic source implementation diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 8bc2666bcfe3b..da04f30ff8023 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -179,6 +179,11 @@ public final MapperService createMapperService(XContentBuilder mappings) throws return createMapperService(getVersion(), mappings); } + public final MapperService createSytheticSourceMapperService(XContentBuilder mappings) throws IOException { + var settings = Settings.builder().put("index.mapping.source.mode", "synthetic").build(); + return createMapperService(getVersion(), settings, () -> true, mappings); + } + protected IndexVersion getVersion() { return IndexVersion.current(); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 7669ada750c14..c89c0b2e37dd2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.script.ScriptFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.search.lookup.LeafStoredFieldsLookup; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; @@ -1103,6 +1104,10 @@ default boolean preservesExactSource() { return false; } + default boolean ignoreAbove() { + return false; + } + /** * Examples that should work when source is generated from doc values. */ @@ -1321,15 +1326,12 @@ private BlockLoader getBlockLoader(boolean columnReader) { return mapper.fieldType(loaderFieldName).blockLoader(new MappedFieldType.BlockLoaderContext() { @Override public String indexName() { - return "test_index"; + return mapper.getIndexSettings().getIndex().getName(); } @Override public IndexSettings indexSettings() { - var imd = IndexMetadata.builder(indexName()) - .settings(MapperTestCase.indexSettings(IndexVersion.current(), 1, 1).put(Settings.EMPTY)) - .build(); - return new IndexSettings(imd, Settings.EMPTY); + return mapper.getIndexSettings(); } @Override @@ -1362,9 +1364,19 @@ public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { private void testBlockLoader(boolean syntheticSource, boolean columnReader) throws IOException { // TODO if we're not using synthetic source use a different sort of example. Or something. - SyntheticSourceExample example = syntheticSourceSupport(false, columnReader).example(5); + var syntheticSourceSupport = syntheticSourceSupport(false, columnReader); + SyntheticSourceExample example = syntheticSourceSupport.example(5); + if (syntheticSource && columnReader == false) { + // The synthetic source testing support can't always handle now the difference between stored and synthetic source mode. + // In case of ignore above, the ignored values are always appended after the valid values + // (both if field has doc values or stored field). While stored source just reads original values (from _source) and there + // is no notion of values that are ignored. + // TODO: fix this by improving block loader support: https://github.com/elastic/elasticsearch/issues/115257 + assumeTrue("inconsistent synthetic source testing support with ignore above", syntheticSourceSupport.ignoreAbove() == false); + } + // TODO: only rely index.mapping.source.mode setting XContentBuilder mapping = syntheticSource ? syntheticSourceFieldMapping(example.mapping) : fieldMapping(example.mapping); - MapperService mapper = createMapperService(mapping); + MapperService mapper = syntheticSource ? createSytheticSourceMapperService(mapping) : createMapperService(mapping); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field"); if (syntheticSource) { // geo_point and point do not yet support synthetic source @@ -1373,11 +1385,16 @@ private void testBlockLoader(boolean syntheticSource, boolean columnReader) thro blockReaderSupport.syntheticSource ); } - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } - protected final void testBlockLoader(boolean columnReader, SyntheticSourceExample example, BlockReaderSupport blockReaderSupport) - throws IOException { + protected final void testBlockLoader( + boolean columnReader, + SyntheticSourceExample example, + BlockReaderSupport blockReaderSupport, + SourceLoader sourceLoader + ) throws IOException { BlockLoader loader = blockReaderSupport.getBlockLoader(columnReader); Function valuesConvert = loadBlockExpected(blockReaderSupport, columnReader); if (valuesConvert == null) { @@ -1404,9 +1421,15 @@ protected final void testBlockLoader(boolean columnReader, SyntheticSourceExampl return; } } else { + StoredFieldsSpec storedFieldsSpec = loader.rowStrideStoredFieldSpec(); + if (storedFieldsSpec.requiresSource()) { + storedFieldsSpec = storedFieldsSpec.merge( + new StoredFieldsSpec(true, storedFieldsSpec.requiresMetadata(), sourceLoader.requiredStoredFields()) + ); + } BlockLoaderStoredFieldsFromLeafLoader storedFieldsLoader = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), - loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? sourceLoader.leaf(ctx.reader(), null) : null ); storedFieldsLoader.advanceTo(0); BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), 1); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java index b6a031c9ff906..97ded7f9a06f2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java @@ -51,24 +51,9 @@ public static MapperTestCase.BlockReaderSupport getSupportedReaders(MapperServic } public static Function loadBlockExpected(MapperTestCase.BlockReaderSupport blockReaderSupport, boolean columnReader) { - if (nullLoaderExpected(blockReaderSupport.mapper(), blockReaderSupport.loaderFieldName())) { - return null; - } return v -> ((BytesRef) v).utf8ToString(); } - private static boolean nullLoaderExpected(MapperService mapper, String fieldName) { - MappedFieldType type = mapper.fieldType(fieldName); - if (type instanceof TextFieldMapper.TextFieldType t) { - if (t.isSyntheticSource() == false || t.canUseSyntheticSourceDelegateForQuerying() || t.isStored()) { - return false; - } - String parentField = mapper.mappingLookup().parentField(fieldName); - return parentField == null || nullLoaderExpected(mapper, parentField); - } - return false; - } - public static void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) { // `reader` here is reader of original document and `roundTripReader` reads document // created from synthetic source. @@ -98,6 +83,11 @@ private static class TextFieldFamilySyntheticSourceSupport implements MapperTest ); } + @Override + public boolean ignoreAbove() { + return keywordMultiFieldSyntheticSourceSupport.ignoreAbove(); + } + @Override public MapperTestCase.SyntheticSourceExample example(int maxValues) { if (store) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index ee747d98c26f8..74affb10eaf20 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -241,6 +241,12 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + if (rowStrideReaders.isEmpty()) { return; } @@ -259,7 +265,7 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( storedFieldLoader.getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null ); for (int p = 0; p < docs.count(); p++) { int doc = docs.get(p); @@ -381,13 +387,18 @@ private void fieldsMoved(LeafReaderContext ctx, int shard) throws IOException { FieldWork field = fields[f]; rowStride[f] = field.rowStride(ctx); storedFieldsSpec = storedFieldsSpec.merge(field.loader.rowStrideStoredFieldSpec()); - storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null - ); - if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { - trackStoredFields(storedFieldsSpec, false); - } + } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null + ); + if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + trackStoredFields(storedFieldsSpec, false); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index fb18cfb4959c7..4c07516051287 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -617,8 +617,7 @@ private String generateInvalidQueryInferenceResultsMessage(StringBuilder baseMes @Override public BlockLoader blockLoader(MappedFieldType.BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name().concat(".text"))); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } } diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml index 68597afda6c78..bc81d1eb67309 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml @@ -283,7 +283,7 @@ teardown: - match: {values.0.3: "PUT"} - match: {values.0.4: false} - match: {values.0.5: "POINT (-74.006 40.7128)"} - - match: {values.0.6: null} # null is expected, because text fields aren't stored in ignored source + - match: {values.0.6: "Do. Or do not. There is no try."} - match: {values.0.7: 102} - do: @@ -296,10 +296,86 @@ teardown: - match: {columns.0.name: "message"} - match: {columns.0.type: "text"} - # null is expected, because text fields aren't stored in ignored source - - match: {values.0.0: null} - - match: {values.1.0: null} - - match: {values.2.0: null} - - match: {values.3.0: null} - - match: {values.4.0: null} - - match: {values.5.0: null} + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + ignore_above: 3 + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + # Note that isn't related to synthetic source. For both stored and synthetic source null is returned: +# - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml index 7e305bda4ef4e..6c840a0cf9d3a 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml @@ -175,3 +175,80 @@ teardown: - match: {values.3.0: "No, I am your father."} - match: {values.4.0: "May the force be with you."} - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with stored keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + store: true + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index e8fd0da496bbe..b43d87c17e644 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -339,8 +339,7 @@ protected Object parseSourceValue(Object value) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup); } @Override From 291ced7b482ab952f420993f43cacac49a2f9a9e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 23 Oct 2024 10:23:32 +0100 Subject: [PATCH 048/202] Change from Version to BuildVersion in PersistedClusterStateService (#115301) --- .../elasticsearch/env/NodeEnvironmentIT.java | 4 ++-- .../org/elasticsearch/env/BuildVersion.java | 1 - .../env/DefaultBuildVersion.java | 2 +- .../org/elasticsearch/env/NodeMetadata.java | 5 ---- .../env/OverrideNodeVersionCommand.java | 6 ++--- .../gateway/PersistedClusterStateService.java | 24 +++++++------------ .../elasticsearch/env/NodeMetadataTests.java | 8 ------- .../env/OverrideNodeVersionCommandTests.java | 18 +++++++------- .../PersistedClusterStateServiceTests.java | 10 +++++--- 9 files changed, 32 insertions(+), 46 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index f813932ebe924..ecd5c5af8649f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -123,7 +123,7 @@ public Settings onNodeStopped(String nodeName) { public void testFailsToStartIfDowngraded() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), @@ -133,7 +133,7 @@ public void testFailsToStartIfDowngraded() { public void testFailsToStartIfUpgradedTooFar() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 0de346249ccbc..42c45a14977eb 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -59,7 +59,6 @@ public abstract class BuildVersion { public abstract boolean isFutureVersion(); // temporary - // TODO[wrb]: remove from PersistedClusterStateService // TODO[wrb]: remove from security bootstrap checks @Deprecated public Version toVersion() { diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index e0531b5a192a0..dcc5ed3aee3f8 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -72,6 +72,6 @@ public int hashCode() { @Override public String toString() { - return Version.fromId(versionId).toString(); + return version.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 6a72a7e7fcda5..5b2ee39c1b622 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -42,7 +42,6 @@ public final class NodeMetadata { private final IndexVersion oldestIndexVersion; - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // version should be non-null in the node metadata from v9 onwards private NodeMetadata( final String nodeId, final BuildVersion buildVersion, @@ -112,11 +111,7 @@ public IndexVersion oldestIndexVersion() { return oldestIndexVersion; } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) public void verifyUpgradeToCurrentVersion() { - // Enable the following assertion for V9: - // assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards"; - if (nodeVersion.onOrAfterMinimumCompatible() == false) { throw new IllegalStateException( "cannot upgrade a node from version [" diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index 96158965cddfe..1ddc8d5b26bd9 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -74,7 +74,7 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio "found [" + nodeMetadata + "] which is compatible with current version [" - + Version.CURRENT + + BuildVersion.current() + "], so there is no need to override the version checks" ); } catch (IllegalStateException e) { @@ -86,10 +86,10 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio (nodeMetadata.nodeVersion().onOrAfterMinimumCompatible() == false ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( "V_OLD", nodeMetadata.nodeVersion().toString() - ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString()) + ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", BuildVersion.current().toString()) ); - PersistedClusterStateService.overrideVersion(Version.CURRENT, paths); + PersistedClusterStateService.overrideVersion(BuildVersion.current(), paths); terminal.println(SUCCESS_MESSAGE); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index 0c6cf2c8a0761..92b8686700a05 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -42,7 +42,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -159,8 +158,6 @@ public class PersistedClusterStateService { public static final int IS_LAST_PAGE = 1; public static final int IS_NOT_LAST_PAGE = 0; private static final int COMMIT_DATA_SIZE = 7; - // We added CLUSTER_UUID_KEY and CLUSTER_UUID_COMMITTED_KEY in 8.8 - private static final int COMMIT_DATA_SIZE_BEFORE_8_8 = 5; private static final MergePolicy NO_MERGE_POLICY = noMergePolicy(); private static final MergePolicy DEFAULT_MERGE_POLICY = defaultMergePolicy(); @@ -350,7 +347,7 @@ public record OnDiskStateMetadata( @Nullable public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { String nodeId = null; - Version version = null; + BuildVersion version = null; IndexVersion oldestIndexVersion = IndexVersions.ZERO; for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); @@ -367,7 +364,7 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { ); } else if (nodeId == null) { nodeId = thisNodeId; - version = Version.fromId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); + version = BuildVersion.fromVersionId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); if (userData.containsKey(OLDEST_INDEX_VERSION_KEY)) { oldestIndexVersion = IndexVersion.fromId(Integer.parseInt(userData.get(OLDEST_INDEX_VERSION_KEY))); } else { @@ -382,14 +379,13 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { if (nodeId == null) { return null; } - // TODO: remove use of Version here (ES-7343) - return new NodeMetadata(nodeId, BuildVersion.fromVersionId(version.id()), oldestIndexVersion); + return new NodeMetadata(nodeId, version, oldestIndexVersion); } /** * Overrides the version field for the metadata in the given data path */ - public static void overrideVersion(Version newVersion, Path... dataPaths) throws IOException { + public static void overrideVersion(BuildVersion newVersion, Path... dataPaths) throws IOException { for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); if (Files.exists(indexPath)) { @@ -399,7 +395,7 @@ public static void overrideVersion(Version newVersion, Path... dataPaths) throws try (IndexWriter indexWriter = createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) { final Map commitData = new HashMap<>(userData); - commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id())); commitData.put(OVERRIDDEN_NODE_VERSION_KEY, Boolean.toString(true)); indexWriter.setLiveCommitData(commitData.entrySet()); indexWriter.commit(); @@ -664,11 +660,9 @@ public OnDiskStateMetadata loadOnDiskStateMetadataFromUserData(Map commitData = Maps.newMapWithExpectedSize(COMMIT_DATA_SIZE); commitData.put(CURRENT_TERM_KEY, Long.toString(currentTerm)); commitData.put(LAST_ACCEPTED_VERSION_KEY, Long.toString(lastAcceptedVersion)); - commitData.put(NODE_VERSION_KEY, Integer.toString(Version.CURRENT.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(BuildVersion.current().id())); commitData.put(OLDEST_INDEX_VERSION_KEY, Integer.toString(oldestIndexVersion.id())); commitData.put(NODE_ID_KEY, nodeId); commitData.put(CLUSTER_UUID_KEY, clusterUUID); diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 8bfd4c7c5ac68..22308e15f4845 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -167,10 +167,6 @@ public void testUpgradeMarksPreviousVersion() { assertThat(nodeMetadata.previousNodeVersion(), equalTo(buildVersion)); } - public static Version tooNewVersion() { - return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); - } - public static IndexVersion tooNewIndexVersion() { return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } @@ -179,10 +175,6 @@ public static BuildVersion tooNewBuildVersion() { return BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, 99999999)); } - public static Version tooOldVersion() { - return Version.fromId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); - } - public static BuildVersion tooOldBuildVersion() { return BuildVersion.fromVersionId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); } diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index bf3fc1697aa44..c7614e2d98eed 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -96,7 +96,9 @@ public void testFailsOnEmptyPath() { } public void testFailsIfUnnecessary() throws IOException { - final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id)); + final BuildVersion nodeVersion = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id) + ); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); final ElasticsearchException elasticsearchException = expectThrows( @@ -107,7 +109,7 @@ public void testFailsIfUnnecessary() throws IOException { elasticsearchException.getMessage(), allOf( containsString("compatible with current version"), - containsString(Version.CURRENT.toString()), + containsString(BuildVersion.current().toString()), containsString(nodeVersion.toString()) ) ); @@ -115,7 +117,7 @@ public void testFailsIfUnnecessary() throws IOException { } public void testWarnsIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput("n"); @@ -137,11 +139,11 @@ public void testWarnsIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); @@ -162,11 +164,11 @@ public void testWarnsIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); @@ -189,7 +191,7 @@ public void testOverwritesIfTooOld() throws Exception { } public void testOverwritesIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index 450d123f551c8..4428a7e078510 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -1414,14 +1415,17 @@ public void testOverrideLuceneVersion() throws IOException { assertThat(clusterState.metadata().version(), equalTo(version)); } + @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + BuildVersion overrideVersion = BuildVersion.fromVersionId(Version.V_8_0_0.id); + NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); - PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths()); + PersistedClusterStateService.overrideVersion(overrideVersion, persistedClusterStateService.getDataPaths()); NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), metadata.nodeVersion()); + assertEquals(overrideVersion, metadata.nodeVersion()); for (Path p : persistedClusterStateService.getDataPaths()) { NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), individualMetadata.nodeVersion()); + assertEquals(overrideVersion, individualMetadata.nodeVersion()); } } } From 0f7ddd5c9878e03f6f0ce6ac6bce58c609e25ff5 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 13:34:37 +0100 Subject: [PATCH 049/202] [ML] New names for the default inference endpoints (#115395) The new names are .elser-2-elasticsearch and .multilingual-e5-small-elasticsearch --- .../services/elasticsearch/ElasticsearchInternalService.java | 4 ++-- .../elasticsearch/ElasticsearchInternalServiceTests.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 49919fda9f89d..6732e5719b897 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -85,8 +85,8 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi ); public static final int EMBEDDING_MAX_BATCH_SIZE = 10; - public static final String DEFAULT_ELSER_ID = ".elser-2"; - public static final String DEFAULT_E5_ID = ".multi-e5-small"; + public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; + public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; private static final Logger logger = LogManager.getLogger(ElasticsearchInternalService.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ElasticsearchInternalService.class); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index b82b8a08f2175..5ec66687752a8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -1561,8 +1561,8 @@ public void testEmbeddingTypeFromTaskTypeAndSettings() { public void testIsDefaultId() { var service = createService(mock(Client.class)); - assertTrue(service.isDefaultId(".elser-2")); - assertTrue(service.isDefaultId(".multi-e5-small")); + assertTrue(service.isDefaultId(".elser-2-elasticsearch")); + assertTrue(service.isDefaultId(".multilingual-e5-small-elasticsearch")); assertFalse(service.isDefaultId("foo")); } From 91a5a2e6a1eefa64bfbd39db62cb50a478f022fb Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:45:09 +0300 Subject: [PATCH 050/202] Unmute SearchWithMinCompatibleSearchNodeIT tests muted for 7.17 (#115386) --- muted-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 45b1398df7ace..c248729b539fd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -23,12 +23,6 @@ tests: - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testPreload issue: https://github.com/elastic/elasticsearch/issues/110211 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testMinVersionAsNewVersion - issue: https://github.com/elastic/elasticsearch/issues/95384 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testCcsMinimizeRoundtripsIsFalse - issue: https://github.com/elastic/elasticsearch/issues/101974 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" From 9e95cbd86b3e2009d679c0bdbacb35f1a8cc7e27 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 00:00:13 +1100 Subject: [PATCH 051/202] Mute org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} #115412 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c248729b539fd..93714e098f677 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -297,6 +297,9 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 +- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT + method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} + issue: https://github.com/elastic/elasticsearch/issues/115412 # Examples: # From 728ac0c8a790fd9afc2dfc970d5c2e39c36cedc9 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 23 Oct 2024 15:07:04 +0200 Subject: [PATCH 052/202] adaptive allocations: reset time interval with zero requests upon starting an allocation (#115400) --- .../AdaptiveAllocationsScaler.java | 9 ++++-- .../AdaptiveAllocationsScalerService.java | 11 ++++++- .../AdaptiveAllocationsScalerTests.java | 29 +++++++++++++++++++ 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java index bbd63e0d3bfe9..0dec99a9b9bb9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java @@ -33,6 +33,7 @@ public class AdaptiveAllocationsScaler { private final String deploymentId; private final KalmanFilter1d requestRateEstimator; private final KalmanFilter1d inferenceTimeEstimator; + private final long scaleToZeroAfterNoRequestsSeconds; private double timeWithoutRequestsSeconds; private int numberOfAllocations; @@ -44,10 +45,11 @@ public class AdaptiveAllocationsScaler { private Double lastMeasuredRequestRate; private Double lastMeasuredInferenceTime; private Long lastMeasuredQueueSize; - private long scaleToZeroAfterNoRequestsSeconds; AdaptiveAllocationsScaler(String deploymentId, int numberOfAllocations, long scaleToZeroAfterNoRequestsSeconds) { this.deploymentId = deploymentId; + this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; + // A smoothing factor of 100 roughly means the last 100 measurements have an effect // on the estimated values. The sampling time is 10 seconds, so approximately the // last 15 minutes are taken into account. @@ -67,7 +69,6 @@ public class AdaptiveAllocationsScaler { lastMeasuredRequestRate = null; lastMeasuredInferenceTime = null; lastMeasuredQueueSize = null; - this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; } void setMinMaxNumberOfAllocations(Integer minNumberOfAllocations, Integer maxNumberOfAllocations) { @@ -117,6 +118,10 @@ void process(AdaptiveAllocationsScalerService.Stats stats, double timeIntervalSe dynamicsChanged = false; } + void resetTimeWithoutRequests() { + timeWithoutRequestsSeconds = 0; + } + double getLoadLower() { double requestRateLower = Math.max(0.0, requestRateEstimator.lower()); double inferenceTimeLower = Math.max(0.0, inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.lower() : 1.0); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java index 770e890512935..16ec3ee9b468c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java @@ -188,7 +188,10 @@ Collection observeDouble(Function Date: Wed, 23 Oct 2024 09:16:13 -0400 Subject: [PATCH 053/202] [ML] Increase default queue_capacity to 10_000 and decrease max queue_capacity to 100_000 (#115041) * Increase default queue capacity and decrease max queue capacity * Update docs/changelog/115041.yaml * Update tests to match new constants --- docs/changelog/115041.yaml | 6 ++++++ .../ml/action/StartTrainedModelDeploymentAction.java | 4 ++-- .../StartTrainedModelDeploymentRequestTests.java | 10 +++++----- 3 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115041.yaml diff --git a/docs/changelog/115041.yaml b/docs/changelog/115041.yaml new file mode 100644 index 0000000000000..f4c047c1569ec --- /dev/null +++ b/docs/changelog/115041.yaml @@ -0,0 +1,6 @@ +pr: 115041 +summary: Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` + to 100_000 +area: Machine Learning +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index ca789fee7b744..b298d486c9e03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -71,7 +71,7 @@ public class StartTrainedModelDeploymentAction extends ActionType implements ToXCon /** * If the queue is created then we can OOM when we create the queue. */ - private static final int MAX_QUEUE_CAPACITY = 1_000_000; + private static final int MAX_QUEUE_CAPACITY = 100_000; public static final ParseField MODEL_ID = new ParseField("model_id"); public static final ParseField DEPLOYMENT_ID = new ParseField("deployment_id"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java index 730d994fc5e35..46fc8a36c2c2b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java @@ -67,7 +67,7 @@ public static Request createRandom() { request.setNumberOfAllocations(randomIntBetween(1, 8)); } if (randomBoolean()) { - request.setQueueCapacity(randomIntBetween(1, 1000000)); + request.setQueueCapacity(randomIntBetween(1, 100_000)); } if (randomBoolean()) { request.setPriority(randomFrom(Priority.values()).toString()); @@ -168,7 +168,7 @@ public void testValidate_GivenQueueCapacityIsNegative() { public void testValidate_GivenQueueCapacityIsAtLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_000); + request.setQueueCapacity(100_000); ActionRequestValidationException e = request.validate(); @@ -177,12 +177,12 @@ public void testValidate_GivenQueueCapacityIsAtLimit() { public void testValidate_GivenQueueCapacityIsOverLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_001); + request.setQueueCapacity(100_001); ActionRequestValidationException e = request.validate(); assertThat(e, is(not(nullValue()))); - assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 1000000")); + assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 100000")); } public void testValidate_GivenTimeoutIsNegative() { @@ -234,6 +234,6 @@ public void testDefaults() { assertThat(request.getNumberOfAllocations(), nullValue()); assertThat(request.computeNumberOfAllocations(), equalTo(1)); assertThat(request.getThreadsPerAllocation(), equalTo(1)); - assertThat(request.getQueueCapacity(), equalTo(1024)); + assertThat(request.getQueueCapacity(), equalTo(10_000)); } } From 43a6b3592eda425338ddecd357f11956710e2ca6 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 23 Oct 2024 16:29:03 +0300 Subject: [PATCH 054/202] Unmuting RankDocsRetrieverBuilderTests testRewrite (#115403) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 93714e098f677..f59ca0c213279 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -242,9 +242,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.search.retriever.RankDocsRetrieverBuilderTests - method: testRewrite - issue: https://github.com/elastic/elasticsearch/issues/114467 - class: org.elasticsearch.packaging.test.DockerTests method: test022InstallPluginsFromLocalArchive issue: https://github.com/elastic/elasticsearch/issues/111063 From 98d53352160d6d2c397ea74567ebc690b701973f Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 23 Oct 2024 15:29:53 +0200 Subject: [PATCH 055/202] ESQL: Disable pushdown of WHERE past STATS (#115308) Fix https://github.com/elastic/elasticsearch/issues/115281 Let's disable the faulty optimization for now and re-introduce it later, correctly. --- docs/changelog/115308.yaml | 6 +++ .../src/main/resources/stats.csv-spec | 27 +++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 7 +++- .../logical/PushDownAndCombineFilters.java | 15 ++----- .../optimizer/LogicalPlanOptimizerTests.java | 40 +++++++++++-------- .../PushDownAndCombineFiltersTests.java | 1 + 6 files changed, 67 insertions(+), 29 deletions(-) create mode 100644 docs/changelog/115308.yaml diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml new file mode 100644 index 0000000000000..163f0232a3e58 --- /dev/null +++ b/docs/changelog/115308.yaml @@ -0,0 +1,6 @@ +pr: 115308 +summary: "ESQL: Disable pushdown of WHERE past STATS" +area: ES|QL +type: bug +issues: + - 115281 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index ac4351413129e..6d4c596e8d7de 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2291,6 +2291,33 @@ m:integer |a:double |x:integer 74999 |48249.0 |0 ; +statsWithFilterOnGroups +required_capability: fix_filter_pushdown_past_stats +FROM employees +| STATS v = VALUES(emp_no) by job_positions | WHERE job_positions == "Accountant" | MV_EXPAND v | SORT v +; + +v:integer | job_positions:keyword + 10001 | Accountant + 10012 | Accountant + 10016 | Accountant + 10023 | Accountant + 10025 | Accountant + 10028 | Accountant + 10034 | Accountant + 10037 | Accountant + 10044 | Accountant + 10045 | Accountant + 10050 | Accountant + 10051 | Accountant + 10066 | Accountant + 10081 | Accountant + 10085 | Accountant + 10089 | Accountant + 10092 | Accountant + 10094 | Accountant +; + statsWithFiltering required_capability: per_agg_filtering diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 94211e4726a2c..5157a80022c39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -411,7 +411,12 @@ public enum Cap { /** * Support for semantic_text field mapping */ - SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG); + SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG), + /** + * Fix for an optimization that caused wrong results + * https://github.com/elastic/elasticsearch/issues/115281 + */ + FIX_FILTER_PUSHDOWN_PAST_STATS; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java index ed09d0bc16754..15e49c22a44db 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; @@ -38,18 +36,13 @@ protected LogicalPlan rule(Filter filter) { LogicalPlan child = filter.child(); Expression condition = filter.condition(); + // TODO: Push down past STATS if the filter is only on the groups; but take into account how `STATS ... BY field` handles + // multi-values: It seems to be equivalent to `EVAL field = MV_DEDUPE(field) | MV_EXPAND(field) | STATS ... BY field`, where the + // last `STATS ... BY field` can assume that `field` is single-valued (to be checked more thoroughly). + // https://github.com/elastic/elasticsearch/issues/115311 if (child instanceof Filter f) { // combine nodes into a single Filter with updated ANDed condition plan = f.with(Predicates.combineAnd(List.of(f.condition(), condition))); - } else if (child instanceof Aggregate agg) { // TODO: re-evaluate along with multi-value support - // Only push [parts of] a filter past an agg if these/it operates on agg's grouping[s], not output. - plan = maybePushDownPastUnary( - filter, - agg, - e -> e instanceof Attribute && agg.output().contains(e) && agg.groupings().contains(e) == false - || e instanceof AggregateFunction, - NO_OP - ); } else if (child instanceof Eval eval) { // Don't push if Filter (still) contains references to Eval's fields. // Account for simple aliases in the Eval, though - these shouldn't stop us. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 8d7c1997f78e3..ff7675504d6ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -738,6 +738,7 @@ public void testMultipleCombineLimits() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastRefAgg() { // expected plan: "from test | where emp_no > 1 and emp_no < 3 | stats x = count(1) by emp_no | where x > 7" LogicalPlan plan = optimizedPlan(""" @@ -790,6 +791,7 @@ public void testNoPushDownOrFilterPastAgg() { assertTrue(stats.child() instanceof EsRelation); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivePushDownComplexFilterPastAgg() { // expected plan: from test | emp_no > 0 | stats x = count(1) by emp_no | where emp_no < 3 or x > 9 LogicalPlan plan = optimizedPlan(""" @@ -1393,13 +1395,15 @@ public void testPushDownLimitThroughMultipleSort_AfterMvExpand2() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#232],[MAX(salary{f}#233) AS max_s, first_name{f}#232]] - * \_Filter[ISNOTNULL(first_name{f}#232)] - * \_MvExpand[first_name{f}#232] - * \_TopN[[Order[emp_no{f}#231,ASC,LAST]],50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#231, first_name{f}#232, salary{f}#233] + * \_Filter[ISNOTNULL(first_name{r}#23)] + * \_Aggregate[STANDARD,[first_name{r}#23],[MAX(salary{f}#18,true[BOOLEAN]) AS max_s, first_name{r}#23]] + * \_MvExpand[first_name{f}#14,first_name{r}#23] + * \_TopN[[Order[emp_no{f}#13,ASC,LAST]],50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#19, emp_no{f}#13, first_name{f}#14, ..] */ public void testDontPushDownLimitPastAggregate_AndMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1413,10 +1417,10 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { | limit 5"""); var limit = as(plan, Limit.class); + var filter = as(limit.child(), Filter.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - var mvExp = as(filter.child(), MvExpand.class); + var agg = as(filter.child(), Aggregate.class); + var mvExp = as(agg.child(), MvExpand.class); var topN = as(mvExp.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(50)); assertThat(orderNames(topN), contains("emp_no")); @@ -1424,14 +1428,16 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#262],[MAX(salary{f}#263) AS max_s, first_name{f}#262]] - * \_Filter[ISNOTNULL(first_name{f}#262)] - * \_Limit[50[INTEGER]] - * \_MvExpand[first_name{f}#262] - * \_Limit[50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#261, first_name{f}#262, salary{f}#263] + * \_Filter[ISNOTNULL(first_name{r}#22)] + * \_Aggregate[STANDARD,[first_name{r}#22],[MAX(salary{f}#17,true[BOOLEAN]) AS max_s, first_name{r}#22]] + * \_Limit[50[INTEGER]] + * \_MvExpand[first_name{f}#13,first_name{r}#22] + * \_Limit[50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] */ public void testPushDown_TheRightLimit_PastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1445,9 +1451,9 @@ public void testPushDown_TheRightLimit_PastMvExpand() { var limit = as(plan, Limit.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - limit = as(filter.child(), Limit.class); + var filter = as(limit.child(), Filter.class); + var agg = as(filter.child(), Aggregate.class); + limit = as(agg.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(50)); var mvExp = as(limit.child(), MvExpand.class); limit = as(mvExp.child(), Limit.class); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java index 49a738f4f4fa3..e159e5ed0bd7d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java @@ -213,6 +213,7 @@ public void testPushDownLikeRlikeFilter() { // from ... | where a > 1 | stats count(1) by b | where count(1) >= 3 and b < 2 // => ... | where a > 1 and b < 2 | stats count(1) by b | where count(1) >= 3 + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastFunctionAgg() { EsRelation relation = relation(); GreaterThan conditionA = greaterThanOf(getFieldAttribute("a"), ONE); From 1ca39789e27ce4ca4c1f4e88112aa24a87d96649 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 23 Oct 2024 17:40:18 +0300 Subject: [PATCH 056/202] Updating error handling for compound retrievers (#115277) --- .../retriever/CompoundRetrieverBuilder.java | 19 +++++- .../xpack/rank/rrf/RRFRetrieverBuilderIT.java | 63 ++++++++++++++++--- 2 files changed, 74 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index 7373bc5b75049..b15798db95b6f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -11,6 +11,8 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.MultiSearchRequest; @@ -20,6 +22,7 @@ import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; @@ -121,10 +124,17 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio public void onResponse(MultiSearchResponse items) { List topDocs = new ArrayList<>(); List failures = new ArrayList<>(); + // capture the max status code returned by any of the responses + int statusCode = RestStatus.OK.getStatus(); + List retrieversWithFailures = new ArrayList<>(); for (int i = 0; i < items.getResponses().length; i++) { var item = items.getResponses()[i]; if (item.isFailure()) { failures.add(item.getFailure()); + retrieversWithFailures.add(innerRetrievers.get(i).retriever().getName()); + if (ExceptionsHelper.status(item.getFailure()).getStatus() > statusCode) { + statusCode = ExceptionsHelper.status(item.getFailure()).getStatus(); + } } else { assert item.getResponse() != null; var rankDocs = getRankDocs(item.getResponse()); @@ -133,7 +143,14 @@ public void onResponse(MultiSearchResponse items) { } } if (false == failures.isEmpty()) { - IllegalStateException ex = new IllegalStateException("Search failed - some nested retrievers returned errors."); + assert statusCode != RestStatus.OK.getStatus(); + final String errMessage = "[" + + getName() + + "] search failed - retrievers '" + + retrieversWithFailures + + "' returned errors. " + + "All failures are attached as suppressed exceptions."; + Exception ex = new ElasticsearchStatusException(errMessage, RestStatus.fromCode(statusCode)); failures.forEach(ex::addSuppressed); listener.onFailure(ex); } else { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java index c5978219d94d3..37e1807d138aa 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.rank.rrf; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -18,6 +20,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -47,7 +50,6 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -589,11 +591,11 @@ public void testRRFExplainWithAnotherNestedRRF() { }); } - public void testRRFInnerRetrieverSearchError() { + public void testRRFInnerRetrieverAll4xxSearchErrors() { final int rankWindowSize = 100; final int rankConstant = 10; SearchSourceBuilder source = new SearchSourceBuilder(); - // this will throw an error during evaluation + // this will throw a 4xx error during evaluation StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) ); @@ -615,10 +617,57 @@ public void testRRFInnerRetrieverSearchError() { ) ); SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); - Exception ex = expectThrows(IllegalStateException.class, req::get); - assertThat(ex, instanceOf(IllegalStateException.class)); - assertThat(ex.getMessage(), containsString("Search failed - some nested retrievers returned errors")); - assertThat(ex.getSuppressed().length, greaterThan(0)); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getSuppressed().length, equalTo(1)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRRFInnerRetrieverMultipleErrorsOne5xx() { + final int rankWindowSize = 100; + final int rankConstant = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + // this will throw a 4xx error during evaluation + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( + QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) + ); + // this will throw a 5xx error + TestRetrieverBuilder testRetrieverBuilder = new TestRetrieverBuilder("val") { + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + searchSourceBuilder.aggregation(AggregationBuilders.avg("some_invalid_param")); + } + }; + source.retriever( + new RRFRetrieverBuilder( + Arrays.asList( + new CompoundRetrieverBuilder.RetrieverSource(standard0, null), + new CompoundRetrieverBuilder.RetrieverSource(testRetrieverBuilder, null) + ), + rankWindowSize, + rankConstant + ) + ); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard, test]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(ex.getSuppressed().length, equalTo(2)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(ex.getSuppressed()[1].getCause().getCause(), instanceOf(IllegalStateException.class)); } public void testRRFInnerRetrieverErrorWhenExtractingToSource() { From c6bd53f21b0a0fde6f51c97545543dbd2a2f7c65 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 23 Oct 2024 17:17:38 +0200 Subject: [PATCH 057/202] Fix `FileSettingsRoleMappingUpgradeIT` assertions (#115422) Fixes some faulty assertions in an upgrade test. Test failures only manifest on the 8.16 branch since 9.x does not qualify for these upgrade tests, and the change is not backported to 8.17 yet (unrelated CI failures). I validated this works by running it locally from the 8.16 branch. Resolves: https://github.com/elastic/elasticsearch/issues/115410 Resolves: https://github.com/elastic/elasticsearch/issues/115411 --- .../FileSettingsRoleMappingUpgradeIT.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index b3d4dfc68d399..834d97f755dfb 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -25,12 +25,10 @@ import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.function.Supplier; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -104,15 +102,17 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { // the nodes have all been upgraded. Check they re-processed the role mappings in the settings file on // upgrade Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); - List roleMappings = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( - "metadata.role_mappings.role_mappings" + List clusterStateRoleMappings = new XContentTestUtils.JsonMapView( + entityAsMap(client().performRequest(clusterStateRequest)) + ).get("metadata.role_mappings.role_mappings"); + assertThat(clusterStateRoleMappings, is(not(nullValue()))); + assertThat(clusterStateRoleMappings.size(), equalTo(1)); + + assertThat( + entityAsMap(client().performRequest(new Request("GET", "/_security/role_mapping"))).keySet(), + // TODO change this to `contains` once the clean-up migration work is merged + hasItem("everyone_kibana-read-only-operator-mapping") ); - assertThat(roleMappings, is(not(nullValue()))); - assertThat(roleMappings.size(), equalTo(1)); - assertThat(roleMappings, is(instanceOf(Map.class))); - @SuppressWarnings("unchecked") - Map roleMapping = (Map) roleMappings; - assertThat(roleMapping.keySet(), contains("everyone_kibana-read-only-operator-mapping")); } } } From 1c38f87db49e7ea1bc0f3e80d2fe5561b3038caf Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 11:24:55 -0400 Subject: [PATCH 058/202] Refactor PipelineConfiguration#getVersion (#115423) --- .../ingest/PipelineConfiguration.java | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index de172d86b810d..7406ee8837264 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -99,18 +99,13 @@ BytesReference getConfig() { } public Integer getVersion() { - var configMap = getConfigAsMap(); - if (configMap.containsKey("version")) { - Object o = configMap.get("version"); - if (o == null) { - return null; - } else if (o instanceof Number number) { - return number.intValue(); - } else { - throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); - } - } else { + Object o = getConfigAsMap().get("version"); + if (o == null) { return null; + } else if (o instanceof Number number) { + return number.intValue(); + } else { + throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); } } From 4bbedb8aedb27b338f11a692e421e0a212f39f45 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Wed, 23 Oct 2024 18:03:58 +0200 Subject: [PATCH 059/202] Fix file settings service test on windows (#115234) Fix unit test on windows: it looks like the replace-existing flag is necessary to avoid AccessDeniedExceptions like this [example failure](https://gradle-enterprise.elastic.co/s/4tjgx5vzblv36/tests/task/:server:test/details/org.elasticsearch.reservedstate.service.FileSettingsServiceTests/testProcessFileChanges?top-execution=1). Resolves: https://github.com/elastic/elasticsearch/issues/115280 --- .../service/FileSettingsServiceTests.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 8ee2754427dda..c0657b5888ad2 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -250,7 +250,7 @@ public void testProcessFileChanges() throws Exception { fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); // second file change; contents still don't matter - writeTestFile(fileSettingsService.watchedFile(), "{}"); + overwriteTestFile(fileSettingsService.watchedFile(), "{}"); // wait for listener to be called (once for initial processing, once for subsequent update) assertTrue(latch.await(20, TimeUnit.SECONDS)); @@ -355,6 +355,12 @@ public void testHandleSnapshotRestoreResetsMetadata() throws Exception { private void writeTestFile(Path path, String contents) throws IOException { Path tempFilePath = createTempFile(); Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE); + Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + } + + private void overwriteTestFile(Path path, String contents) throws IOException { + Path tempFilePath = createTempFile(); + Files.writeString(tempFilePath, contents); + Files.move(tempFilePath, path, StandardCopyOption.REPLACE_EXISTING); } } From e581ae3482624983eeb4e201299735ed4ebc1066 Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Wed, 23 Oct 2024 12:57:59 -0400 Subject: [PATCH 060/202] Reorder docs sidebar (#115360) --- docs/reference/index.asciidoc | 62 ++++++++++++------- .../release-notes/highlights.asciidoc | 11 ++-- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 7e207146e38e3..18052cfb64e8f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -6,7 +6,7 @@ include::links.asciidoc[] include::landing-page.asciidoc[] -include::release-notes/highlights.asciidoc[] +// overview / install include::intro.asciidoc[] @@ -14,33 +14,37 @@ include::quickstart/index.asciidoc[] include::setup.asciidoc[] -include::upgrade.asciidoc[] +// search solution -include::index-modules.asciidoc[] +include::search/search-your-data/search-your-data.asciidoc[] -include::mapping.asciidoc[] +include::reranking/index.asciidoc[] -include::analysis.asciidoc[] +// data management + +include::index-modules.asciidoc[] include::indices/index-templates.asciidoc[] -include::data-streams/data-streams.asciidoc[] +include::alias.asciidoc[] -include::ingest.asciidoc[] +include::mapping.asciidoc[] -include::alias.asciidoc[] +include::analysis.asciidoc[] -include::search/search-your-data/search-your-data.asciidoc[] +include::ingest.asciidoc[] -include::reranking/index.asciidoc[] +include::connector/docs/index.asciidoc[] -include::query-dsl.asciidoc[] +include::data-streams/data-streams.asciidoc[] -include::aggregations.asciidoc[] +include::data-management.asciidoc[] -include::geospatial-analysis.asciidoc[] +include::data-rollup-transform.asciidoc[] -include::connector/docs/index.asciidoc[] +// analysis tools + +include::query-dsl.asciidoc[] include::eql/eql.asciidoc[] @@ -50,34 +54,48 @@ include::sql/index.asciidoc[] include::scripting.asciidoc[] -include::data-management.asciidoc[] +include::aggregations.asciidoc[] -include::autoscaling/index.asciidoc[] +include::geospatial-analysis.asciidoc[] + +include::watcher/index.asciidoc[] + +// cluster management include::monitoring/index.asciidoc[] -include::data-rollup-transform.asciidoc[] +include::security/index.asciidoc[] + +// production tasks include::high-availability.asciidoc[] +include::how-to.asciidoc[] + +include::autoscaling/index.asciidoc[] + include::snapshot-restore/index.asciidoc[] -include::security/index.asciidoc[] +// reference -include::watcher/index.asciidoc[] +include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] -include::how-to.asciidoc[] - include::troubleshooting.asciidoc[] -include::rest-api/index.asciidoc[] +// upgrades + +include::upgrade.asciidoc[] include::migration/index.asciidoc[] +include::release-notes/highlights.asciidoc[] + include::release-notes.asciidoc[] include::dependencies-versions.asciidoc[] +// etc + include::redirects.asciidoc[] \ No newline at end of file diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 81d46b5773877..c3f6fb43f2ffd 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,5 +1,6 @@ +[chapter] [[release-highlights]] -== What's new in {minor-version} += What's new in {minor-version} coming::[{minor-version}] @@ -37,7 +38,7 @@ endif::[] [discrete] [[esql_inlinestats]] -=== ESQL: INLINESTATS +== ESQL: INLINESTATS This adds the `INLINESTATS` command to ESQL which performs a STATS and then enriches the results into the output stream. So, this query: @@ -62,7 +63,7 @@ Produces output like: [discrete] [[always_allow_rebalancing_by_default]] -=== Always allow rebalancing by default +== Always allow rebalancing by default In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has @@ -74,7 +75,7 @@ version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy al [discrete] [[add_global_retention_in_data_stream_lifecycle]] -=== Add global retention in data stream lifecycle +== Add global retention in data stream lifecycle Data stream lifecycle now supports configuring retention on a cluster level, namely global retention. Global retention \nallows us to configure two different retentions: @@ -88,7 +89,7 @@ data stream lifecycle and it allows any data stream \ndata to be deleted after t [discrete] [[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] -=== Enable ZStandard compression for indices with index.codec set to best_compression +== Enable ZStandard compression for indices with index.codec set to best_compression Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a From 06a3e1902102a43e4ef9685c22d71c10d4bb280c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 23 Oct 2024 10:28:17 -0700 Subject: [PATCH 061/202] Remove LongGCDisruption scheme (#115046) Long GC disruption relies on Thread.resume, which is removed in JDK 23. Tests that use it predate more modern disruption tests. This commit removes gc disruption and the master disruption tests. Note that tests relying on this scheme have already not been running since JDK 20 first deprecated Thread.resume. --- .../discovery/MasterDisruptionIT.java | 44 --- .../discovery/StableMasterDisruptionIT.java | 272 -------------- .../IntermittentLongGCDisruption.java | 109 ------ .../test/disruption/LongGCDisruption.java | 350 ------------------ .../disruption/LongGCDisruptionTests.java | 257 ------------- 5 files changed, 1032 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java delete mode 100644 test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 214fc47222f3a..bf81200509691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -21,21 +21,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; -import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.xcontent.XContentType; -import java.util.ArrayList; -import java.util.HashSet; import java.util.List; -import java.util.Set; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; /** * Tests relating to the loss of the master. @@ -43,44 +37,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one - */ - public void testMasterNodeGCs() throws Exception { - List nodes = startCluster(3); - // NOTE: this assume must happen after starting the cluster, so that cleanup will have something to cleanup. - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - - String oldMasterNode = internalCluster().getMasterName(); - // a very long GC, but it's OK as we remove the disruption when it has had an effect - SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), oldMasterNode, 100, 200, 30000, 60000); - internalCluster().setDisruptionScheme(masterNodeDisruption); - masterNodeDisruption.startDisrupting(); - - Set oldNonMasterNodesSet = new HashSet<>(nodes); - oldNonMasterNodesSet.remove(oldMasterNode); - - List oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet); - - logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode); - for (String node : oldNonMasterNodesSet) { - assertDifferentMaster(node, oldMasterNode); - } - - logger.info("waiting for nodes to elect a new master"); - ensureStableCluster(2, oldNonMasterNodes.get(0)); - - // restore GC - masterNodeDisruption.stopDisrupting(); - final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()); - ensureStableCluster(3, waitTime, false, oldNonMasterNodes.get(0)); - - // make sure all nodes agree on master - String newMaster = internalCluster().getMasterName(); - assertThat(newMaster, not(equalTo(oldMasterNode))); - assertMaster(newMaster, nodes); - } - /** * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition * and verifies that all node agree on the new cluster state diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 32c602791cca4..48db23635220c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -14,33 +14,26 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; import org.elasticsearch.cluster.coordination.MasterHistoryService; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.health.GetHealthAction; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -50,17 +43,12 @@ import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static java.util.Collections.singleton; @@ -227,266 +215,6 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType ensureStableCluster(3); } - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are - * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. - */ - public void testStaleMasterNotHijackingMajority() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .build() - ); - ensureStableCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and - // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is - // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode) - .submitUnbatchedStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failure [sneaky-update]", e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> transitions = entry.getValue(); - assertTrue( - "[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, - transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2())) - ); - } - assertGreenMasterStability(internalCluster().client()); - } - - /** - * This helper method creates a 3-node cluster where all nodes are master-eligible, and then simulates a long GC on the master node 5 - * times (forcing another node to be elected master 5 times). It then asserts that the master stability health indicator status is - * YELLOW, and that expectedMasterStabilitySymptomSubstring is contained in the symptom. - * @param expectedMasterStabilitySymptomSubstring A string to expect in the master stability health indicator symptom - * @throws Exception - */ - public void testRepeatedMasterChanges(String expectedMasterStabilitySymptomSubstring) throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.IDENTITY_CHANGES_THRESHOLD_SETTING.getKey(), 1) - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 100) - .build() - ); - ensureStableCluster(3); - String firstMaster = internalCluster().getMasterName(); - // Force the master to change 2 times: - for (int i = 0; i < 2; i++) { - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - } - List nodeNamesExceptFirstMaster = Arrays.stream(internalCluster().getNodeNames()) - .filter(name -> name.equals(firstMaster) == false) - .toList(); - /* - * It is possible that the first node that became master got re-elected repeatedly. And since it was in a simulated GC when the - * other node(s) were master, it only saw itself as master. So we want to check with another node. - */ - Client client = internalCluster().client(randomFrom(nodeNamesExceptFirstMaster)); - assertMasterStability(client, HealthStatus.YELLOW, containsString(expectedMasterStabilitySymptomSubstring)); - } - - public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstable() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - /* - * In this test we have a single master-eligible node. We pause it repeatedly (simulating a long GC pause for example) so that - * other nodes decide it is no longer the master. However since there is no other master-eligible node, another node is never - * elected master. And the master node never recognizes that it had a problem. So when we run the master stability check on one - * of the data nodes, it will see that there is a problem (the master has gone null repeatedly), but when it checks with the - * master, the master says everything is fine. So we expect a GREEN status. - */ - final List masterNodes = internalCluster().startMasterOnlyNodes( - 1, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .build() - ); - int nullTransitionsThreshold = 1; - final List dataNodes = internalCluster().startDataOnlyNodes( - 2, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), nullTransitionsThreshold) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(60, TimeUnit.SECONDS)) - .build() - ); - ensureStableCluster(3); - for (int i = 0; i < nullTransitionsThreshold + 1; i++) { - final String masterNode = masterNodes.get(0); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), masterNode); - - final CountDownLatch dataNodeMasterSteppedDown = new CountDownLatch(2); - internalCluster().getInstance(ClusterService.class, dataNodes.get(0)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().getInstance(ClusterService.class, dataNodes.get(1)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", masterNode); - masterNodeDisruption.startDisrupting(); - dataNodeMasterSteppedDown.await(30, TimeUnit.SECONDS); - // Stop disruption - logger.info("--> unfreezing node [{}]", masterNode); - masterNodeDisruption.stopDisrupting(); - ensureStableCluster(3, TimeValue.timeValueSeconds(30), false, randomFrom(dataNodes)); - } - assertGreenMasterStability(internalCluster().client(randomFrom(dataNodes))); - } - public void testNoMasterEligibleNodes() throws Exception { /* * In this test we have a single master-eligible node. We then stop the master. We set the master lookup threshold very low on the diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java deleted file mode 100644 index 9e2f8c931c84a..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.TimeValue; - -import java.util.HashSet; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Simulates irregular long gc intervals. - */ -public class IntermittentLongGCDisruption extends LongGCDisruption { - - volatile boolean disrupting; - volatile Thread worker; - - final long intervalBetweenDelaysMin; - final long intervalBetweenDelaysMax; - final long delayDurationMin; - final long delayDurationMax; - - public IntermittentLongGCDisruption( - Random random, - String disruptedNode, - long intervalBetweenDelaysMin, - long intervalBetweenDelaysMax, - long delayDurationMin, - long delayDurationMax - ) { - super(random, disruptedNode); - this.intervalBetweenDelaysMin = intervalBetweenDelaysMin; - this.intervalBetweenDelaysMax = intervalBetweenDelaysMax; - this.delayDurationMin = delayDurationMin; - this.delayDurationMax = delayDurationMax; - } - - static final AtomicInteger thread_ids = new AtomicInteger(); - - @Override - public void startDisrupting() { - disrupting = true; - worker = new Thread(new BackgroundWorker(), "long_gc_simulation_" + thread_ids.incrementAndGet()); - worker.setDaemon(true); - worker.start(); - } - - @Override - public void stopDisrupting() { - if (worker == null) { - return; - } - logger.info("stopping long GCs on [{}]", disruptedNode); - disrupting = false; - worker.interrupt(); - try { - worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax)); - } catch (InterruptedException e) { - logger.info("background thread failed to stop"); - } - worker = null; - } - - private void simulateLongGC(final TimeValue duration) throws InterruptedException { - logger.info("node [{}] goes into GC for for [{}]", disruptedNode, duration); - final Set nodeThreads = new HashSet<>(); - try { - while (suspendThreads(nodeThreads)) - ; - if (nodeThreads.isEmpty() == false) { - Thread.sleep(duration.millis()); - } - } finally { - logger.info("node [{}] resumes from GC", disruptedNode); - resumeThreads(nodeThreads); - } - } - - class BackgroundWorker implements Runnable { - - @Override - public void run() { - while (disrupting) { - try { - TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin))); - simulateLongGC(duration); - - duration = new TimeValue( - intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)) - ); - if (disrupting) { - Thread.sleep(duration.millis()); - } - } catch (InterruptedException e) {} catch (Exception e) { - logger.error("error in background worker", e); - } - } - } - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java deleted file mode 100644 index dce9e2600d0a6..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.disruption; - -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.InternalTestCluster; - -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.Arrays; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -/** - * Suspends all threads on the specified node in order to simulate a long gc. - */ -public class LongGCDisruption extends SingleNodeDisruption { - - private static final Pattern[] unsafeClasses = new Pattern[] { - // logging has shared JVM locks; we may suspend a thread and block other nodes from doing their thing - Pattern.compile("logging\\.log4j"), - // security manager is shared across all nodes and it uses synchronized maps internally - Pattern.compile("java\\.lang\\.SecurityManager"), - // SecureRandom instance from SecureRandomHolder class is shared by all nodes - Pattern.compile("java\\.security\\.SecureRandom"), - // Lucene's WindowsFS is shared across nodes and contains some coarse synchronization - Pattern.compile("org\\.apache\\.lucene\\.tests\\.mockfile\\.WindowsFS") }; - - private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); - - protected final String disruptedNode; - private Set suspendedThreads; - private Thread blockDetectionThread; - - private final AtomicBoolean sawSlowSuspendBug = new AtomicBoolean(false); - - public LongGCDisruption(Random random, String disruptedNode) { - super(random); - this.disruptedNode = disruptedNode; - } - - /** - * Checks if during disruption we ran into a known JVM issue that makes {@link Thread#suspend()} calls block for multiple seconds - * was observed. - * @see JDK-8218446 - * @return true if during thread suspending a call to {@link Thread#suspend()} took more than 3s - */ - public boolean sawSlowSuspendBug() { - return sawSlowSuspendBug.get(); - } - - @Override - public synchronized void startDisrupting() { - if (suspendedThreads == null) { - boolean success = false; - try { - suspendedThreads = ConcurrentHashMap.newKeySet(); - - final String currentThreadName = Thread.currentThread().getName(); - assert isDisruptedNodeThread(currentThreadName) == false - : "current thread match pattern. thread name: " + currentThreadName + ", node: " + disruptedNode; - // we spawn a background thread to protect against deadlock which can happen - // if there are shared resources between caller thread and suspended threads - // see unsafeClasses to how to avoid that - final AtomicReference suspendingError = new AtomicReference<>(); - final Thread suspendingThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - suspendingError.set(e); - } - - @Override - protected void doRun() throws Exception { - // keep trying to suspend threads, until no new threads are discovered. - while (suspendThreads(suspendedThreads)) { - if (Thread.interrupted()) { - return; - } - } - } - }); - suspendingThread.setName(currentThreadName + "[LongGCDisruption][threadSuspender]"); - suspendingThread.start(); - try { - suspendingThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - suspendingThread.interrupt(); // best effort to signal suspending - throw new RuntimeException(e); - } - if (suspendingError.get() != null) { - throw new RuntimeException("unknown error while suspending threads", suspendingError.get()); - } - if (suspendingThread.isAlive()) { - logger.warn( - """ - failed to suspend node [{}]'s threads within [{}] millis. Suspending thread stack trace: - {} - Threads that weren't suspended: - {}""", - disruptedNode, - getSuspendingTimeoutInMillis(), - stackTrace(suspendingThread.getStackTrace()), - suspendedThreads.stream() - .map(t -> t.getName() + "\n----\n" + stackTrace(t.getStackTrace())) - .collect(Collectors.joining("\n")) - ); - suspendingThread.interrupt(); // best effort; - try { - /* - * We need to join on the suspending thread in case it has suspended a thread that is in a critical section and - * needs to be resumed. - */ - suspendingThread.join(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - throw new RuntimeException("suspending node threads took too long"); - } - // block detection checks if other threads are blocked waiting on an object that is held by one - // of the threads that was suspended - if (isBlockDetectionSupported()) { - blockDetectionThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (e instanceof InterruptedException == false) { - throw new AssertionError("unexpected exception in blockDetectionThread", e); - } - } - - @Override - protected void doRun() throws Exception { - while (Thread.currentThread().isInterrupted() == false) { - ThreadInfo[] threadInfos = threadBean.dumpAllThreads(true, true); - for (ThreadInfo threadInfo : threadInfos) { - if (isDisruptedNodeThread(threadInfo.getThreadName()) == false - && threadInfo.getLockOwnerName() != null - && isDisruptedNodeThread(threadInfo.getLockOwnerName())) { - - // find ThreadInfo object of the blocking thread (if available) - ThreadInfo blockingThreadInfo = null; - for (ThreadInfo otherThreadInfo : threadInfos) { - if (otherThreadInfo.getThreadId() == threadInfo.getLockOwnerId()) { - blockingThreadInfo = otherThreadInfo; - break; - } - } - onBlockDetected(threadInfo, blockingThreadInfo); - } - } - Thread.sleep(getBlockDetectionIntervalInMillis()); - } - } - }); - blockDetectionThread.setName(currentThreadName + "[LongGCDisruption][blockDetection]"); - blockDetectionThread.start(); - } - success = true; - } finally { - if (success == false) { - stopBlockDetection(); - // resume threads if failed - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - } else { - throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first"); - } - } - - public boolean isDisruptedNodeThread(String threadName) { - return threadName.contains("[" + disruptedNode + "]"); - } - - private static String stackTrace(StackTraceElement[] stackTraceElements) { - return Arrays.stream(stackTraceElements).map(Object::toString).collect(Collectors.joining("\n")); - } - - @Override - public synchronized void stopDisrupting() { - stopBlockDetection(); - if (suspendedThreads != null) { - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - - private void stopBlockDetection() { - if (blockDetectionThread != null) { - try { - blockDetectionThread.interrupt(); // best effort - blockDetectionThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - blockDetectionThread = null; - } - } - - @Override - public void removeAndEnsureHealthy(InternalTestCluster cluster) { - removeFromCluster(cluster); - ensureNodeCount(cluster); - } - - @Override - public TimeValue expectedTimeToHeal() { - return TimeValue.timeValueMillis(0); - } - - /** - * resolves all threads belonging to given node and suspends them if their current stack trace - * is "safe". Threads are added to nodeThreads if suspended. - * - * returns true if some live threads were found. The caller is expected to call this method - * until no more "live" are found. - */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected boolean suspendThreads(Set nodeThreads) { - Thread[] allThreads = null; - while (allThreads == null) { - allThreads = new Thread[Thread.activeCount()]; - if (Thread.enumerate(allThreads) > allThreads.length) { - // we didn't make enough space, retry - allThreads = null; - } - } - boolean liveThreadsFound = false; - for (Thread thread : allThreads) { - if (thread == null) { - continue; - } - String threadName = thread.getName(); - if (isDisruptedNodeThread(threadName)) { - if (thread.isAlive() && nodeThreads.add(thread)) { - liveThreadsFound = true; - logger.trace("suspending thread [{}]", threadName); - // we assume it is not safe to suspend the thread - boolean safe = false; - try { - /* - * At the bottom of this try-block we will know whether or not it is safe to suspend the thread; we start by - * assuming that it is safe. - */ - boolean definitelySafe = true; - final long startTime = System.nanoTime(); - thread.suspend(); - if (System.nanoTime() - startTime > TimeUnit.SECONDS.toNanos(3L)) { - sawSlowSuspendBug.set(true); - } - // double check the thread is not in a shared resource like logging; if so, let it go and come back - safe: for (StackTraceElement stackElement : thread.getStackTrace()) { - String className = stackElement.getClassName(); - for (Pattern unsafePattern : getUnsafeClasses()) { - if (unsafePattern.matcher(className).find()) { - // it is definitely not safe to suspend the thread - definitelySafe = false; - break safe; - } - } - } - safe = definitelySafe; - } finally { - if (safe == false) { - /* - * Do not log before resuming as we might be interrupted while logging in which case we will throw an - * interrupted exception and never resume the suspended thread that is in a critical section. Also, logging - * before resuming makes for confusing log messages if we never hit the resume. - */ - thread.resume(); - logger.trace("resumed thread [{}] as it is in a critical section", threadName); - nodeThreads.remove(thread); - } - } - } - } - } - return liveThreadsFound; - } - - // for testing - protected Pattern[] getUnsafeClasses() { - return unsafeClasses; - } - - // for testing - protected long getSuspendingTimeoutInMillis() { - return TimeValue.timeValueSeconds(30).getMillis(); - } - - public boolean isBlockDetectionSupported() { - return threadBean.isObjectMonitorUsageSupported() && threadBean.isSynchronizerUsageSupported(); - } - - // for testing - protected long getBlockDetectionIntervalInMillis() { - return 3000L; - } - - // for testing - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - String blockedThreadStackTrace = stackTrace(blockedThread.getStackTrace()); - String blockingThreadStackTrace = blockingThread != null ? stackTrace(blockingThread.getStackTrace()) : "not available"; - throw new AssertionError( - "Thread [" - + blockedThread.getThreadName() - + "] is blocked waiting on the resource [" - + blockedThread.getLockInfo() - + "] held by the suspended thread [" - + blockedThread.getLockOwnerName() - + "] of the disrupted node [" - + disruptedNode - + "].\n" - + "Please add this occurrence to the unsafeClasses list in [" - + LongGCDisruption.class.getName() - + "].\n" - + "Stack trace of blocked thread: " - + blockedThreadStackTrace - + "\n" - + "Stack trace of blocking thread: " - + blockingThreadStackTrace - ); - } - - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected void resumeThreads(Set threads) { - for (Thread thread : threads) { - thread.resume(); - } - } -} diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java deleted file mode 100644 index 72ecba8d502f1..0000000000000 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; -import org.junit.BeforeClass; - -import java.lang.management.ThreadInfo; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -public class LongGCDisruptionTests extends ESTestCase { - - static class LockedExecutor { - ReentrantLock lock = new ReentrantLock(); - - public void executeLocked(Runnable r) { - lock.lock(); - try { - r.run(); - } finally { - lock.unlock(); - } - } - } - - @BeforeClass - public static void ignoreJdk20Plus() { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - } - - public void testBlockingTimeout() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - - @Override - protected long getSuspendingTimeoutInMillis() { - return 100; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[10]; - try { - for (int i = 0; i < 10; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 9 && randomBoolean()) || i == 0; - threads[i] = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - underLock.await(); - RuntimeException e = expectThrows(RuntimeException.class, disruption::startDisrupting); - assertThat(e.getMessage(), containsString("suspending node threads took too long")); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - /** - * Checks that a GC disruption never blocks threads while they are doing something "unsafe" - * but does keep retrying until all threads can be safely paused - */ - public void testNotBlockingUnsafeStackTraces() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[5]; - final Runnable yieldAndIncrement = () -> { - Thread.yield(); // give some chance to catch this stack trace - ops.incrementAndGet(); - }; - try { - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - for (int iter = 0; stop.get() == false; iter++) { - if (iter % 2 == 0) { - lockedExecutor.executeLocked(yieldAndIncrement); - } else { - yieldAndIncrement.run(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - try { - disruption.startDisrupting(); - } catch (RuntimeException e) { - if (e.getMessage().contains("suspending node threads took too long") && disruption.sawSlowSuspendBug()) { - return; - } - throw new AssertionError(e); - } - long first = ops.get(); - assertThat(lockedExecutor.lock.isLocked(), equalTo(false)); // no threads should own the lock - Thread.sleep(100); - assertThat(ops.get(), equalTo(first)); - disruption.stopDisrupting(); - assertBusy(() -> assertThat(ops.get(), greaterThan(first))); - } finally { - disruption.stopDisrupting(); - stop.set(true); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - public void testBlockDetection() throws Exception { - final String disruptedNodeName = "disrupted_node"; - final String blockedNodeName = "blocked_node"; - CountDownLatch waitForBlockDetectionResult = new CountDownLatch(1); - AtomicReference blockDetectionResult = new AtomicReference<>(); - LongGCDisruption disruption = new LongGCDisruption(random(), disruptedNodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[0]; - } - - @Override - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - blockDetectionResult.set(blockedThread); - waitForBlockDetectionResult.countDown(); - } - - @Override - protected long getBlockDetectionIntervalInMillis() { - return 10L; - } - }; - if (disruption.isBlockDetectionSupported() == false) { - return; - } - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final List threads = new ArrayList<>(); - try { - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - - thread.setName("[" + disruptedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { ops.incrementAndGet(); }); - } else { - ops.incrementAndGet(); - } - } - }); - thread.setName("[" + blockedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - // make sure some threads of test_node are under lock - underLock.await(); - disruption.startDisrupting(); - assertTrue(waitForBlockDetectionResult.await(30, TimeUnit.SECONDS)); - disruption.stopDisrupting(); - - ThreadInfo threadInfo = blockDetectionResult.get(); - assertNotNull(threadInfo); - assertThat(threadInfo.getThreadName(), containsString("[" + blockedNodeName + "]")); - assertThat(threadInfo.getLockOwnerName(), containsString("[" + disruptedNodeName + "]")); - assertThat(threadInfo.getLockInfo().getClassName(), containsString(ReentrantLock.class.getName())); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } -} From 7544c88c128e1a5b27e2291c7bcf1a461ecac6bd Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 23 Oct 2024 10:29:05 -0700 Subject: [PATCH 062/202] Consolidate @Before of rolling upgrade tests (#114677) Multiple @Before methods in junit are run in random order. This commit cosolidates the @Before methods of ParameterizedRollingUpgradeTestCase since the code has interdependencies. closes #114330 --- .../upgrades/ParameterizedRollingUpgradeTestCase.java | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java index e7cff5cca5a92..a20981a119d8f 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -55,14 +55,13 @@ public static Iterable parameters() { protected abstract ElasticsearchCluster getUpgradeCluster(); @Before - public void extractOldClusterFeatures() { + public void upgradeNode() throws Exception { + // extract old cluster features if (isOldCluster() && oldClusterTestFeatureService == null) { oldClusterTestFeatureService = testFeatureService; } - } - @Before - public void extractOldIndexVersion() throws Exception { + // extract old index version if (oldIndexVersion == null && upgradedNodes.isEmpty()) { IndexVersion indexVersion = null; // these should all be the same version @@ -93,13 +92,11 @@ public void extractOldIndexVersion() throws Exception { assertThat("Index version could not be read", indexVersion, notNullValue()); oldIndexVersion = indexVersion; } - } - @Before - public void upgradeNode() throws Exception { // Skip remaining tests if upgrade failed assumeFalse("Cluster upgrade failed", upgradeFailed); + // finally, upgrade node if (upgradedNodes.size() < requestedUpgradedNodes) { closeClients(); // we might be running a specific upgrade test by itself - check previous nodes too From b31e5c9609f65dd09800d8caaceb4c00881a8a8e Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 13:35:50 -0400 Subject: [PATCH 063/202] Refactor the download_database_on_pipeline_creation checks (#115421) --- .../geoip/GeoIpDownloaderTaskExecutor.java | 4 ++-- .../ingest/geoip/GeoIpProcessor.java | 16 +++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index e4150005ed1ae..61ca050d91c13 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -309,14 +309,14 @@ private static boolean hasAtLeastOneGeoipProcessor(Map processor { final Map processorConfig = (Map) processor.get(GEOIP_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(GEOIP_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } { final Map processorConfig = (Map) processor.get(IP_LOCATION_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(IP_LOCATION_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 9508bf0346058..f99f8dbe2fdd0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -238,9 +238,8 @@ public Processor create( boolean ignoreMissing = readBooleanProperty(type, processorTag, config, "ignore_missing", false); boolean firstOnly = readBooleanProperty(type, processorTag, config, "first_only", true); - // Validating the download_database_on_pipeline_creation even if the result - // is not used directly by the factory. - downloadDatabaseOnPipelineCreation(type, config, processorTag); + // validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory + readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); // noop, should be removed in 9.0 Object value = config.remove("fallback_to_default_databases"); @@ -319,8 +318,15 @@ public Processor create( ); } - public static boolean downloadDatabaseOnPipelineCreation(String type, Map config, String processorTag) { - return readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); + /** + * Get the value of the "download_database_on_pipeline_creation" property from a processor's config map. + *

+ * As with the actual property definition, the default value of the property is 'true'. Unlike the actual + * property definition, this method doesn't consume (that is, config.remove) the property from + * the config map. + */ + public static boolean downloadDatabaseOnPipelineCreation(Map config) { + return (boolean) config.getOrDefault("download_database_on_pipeline_creation", true); } } From 254cedc988f3bb3b6c731cb1c2cb33c4f8bc018a Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 23 Oct 2024 20:36:44 +0300 Subject: [PATCH 064/202] Separate tests for snapshot and release versions (#115402) --- .../xpack/esql/action/EsqlCapabilities.java | 12 ++- .../rest-api-spec/test/esql/60_usage.yml | 82 ++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5157a80022c39..5e336e6759b1e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -416,7 +416,17 @@ public enum Cap { * Fix for an optimization that caused wrong results * https://github.com/elastic/elasticsearch/issues/115281 */ - FIX_FILTER_PUSHDOWN_PAST_STATS; + FIX_FILTER_PUSHDOWN_PAST_STATS, + + /** + * This enables 60_usage.yml "Basic ESQL usage....snapshot" version test. See also the next capability. + */ + SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot()), + + /** + * This enables 60_usage.yml "Basic ESQL usage....non-snapshot" version test. See also the previous capability. + */ + NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false); private final boolean enabled; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 7d1a4e123299b..b51bbdc4d2f87 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -23,7 +23,86 @@ setup: type: integer --- -"Basic ESQL usage output (telemetry)": +"Basic ESQL usage output (telemetry) snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ snapshot_test_for_telemetry ] + reason: "Test that should only be executed on snapshot versions" + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - length: { esql.features: 15 } + - set: {esql.features.dissect: dissect_counter} + - set: {esql.features.drop: drop_counter} + - set: {esql.features.eval: eval_counter} + - set: {esql.features.enrich: enrich_counter} + - set: {esql.features.from: from_counter} + - set: {esql.features.grok: grok_counter} + - set: {esql.features.keep: keep_counter} + - set: {esql.features.limit: limit_counter} + - set: {esql.features.mv_expand: mv_expand_counter} + - set: {esql.features.rename: rename_counter} + - set: {esql.features.row: row_counter} + - set: {esql.features.show: show_counter} + - set: {esql.features.sort: sort_counter} + - set: {esql.features.stats: stats_counter} + - set: {esql.features.where: where_counter} + - length: { esql.queries: 3 } + - set: {esql.queries.rest.total: rest_total_counter} + - set: {esql.queries.rest.failed: rest_failed_counter} + - set: {esql.queries.kibana.total: kibana_total_counter} + - set: {esql.queries.kibana.failed: kibana_failed_counter} + - set: {esql.queries._all.total: all_total_counter} + - set: {esql.queries._all.failed: all_failed_counter} + - set: {esql.functions.max: functions_max} + - set: {esql.functions.min: functions_min} + - set: {esql.functions.cos: functions_cos} + - set: {esql.functions.to_long: functions_to_long} + - set: {esql.functions.coalesce: functions_coalesce} + + - do: + esql.query: + body: + query: 'from test | where data > 2 and to_long(data) > 2 | sort count desc | limit 5 | stats m = max(data)' + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - match: {esql.features.dissect: $dissect_counter} + - match: {esql.features.eval: $eval_counter} + - match: {esql.features.grok: $grok_counter} + - gt: {esql.features.limit: $limit_counter} + - gt: {esql.features.sort: $sort_counter} + - gt: {esql.features.stats: $stats_counter} + - gt: {esql.features.where: $where_counter} + - gt: {esql.queries.rest.total: $rest_total_counter} + - match: {esql.queries.rest.failed: $rest_failed_counter} + - match: {esql.queries.kibana.total: $kibana_total_counter} + - match: {esql.queries.kibana.failed: $kibana_failed_counter} + - gt: {esql.queries._all.total: $all_total_counter} + - match: {esql.queries._all.failed: $all_failed_counter} + - gt: {esql.functions.max: $functions_max} + - match: {esql.functions.min: $functions_min} + - match: {esql.functions.cos: $functions_cos} + - gt: {esql.functions.to_long: $functions_to_long} + - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 117} # check the "sister" test below for a likely update to the same esql.functions length check + +--- +"Basic ESQL usage output (telemetry) non-snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ non_snapshot_test_for_telemetry ] + reason: "Test that should only be executed on release versions" - do: {xpack.usage: {}} - match: { esql.available: true } @@ -83,3 +162,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 115} # check the "sister" test above for a likely update to the same esql.functions length check From c851c25568f22455f75f8cbf99eb9b8a7b3f7302 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Wed, 23 Oct 2024 13:41:34 -0400 Subject: [PATCH 065/202] Refactor InferenceProcessorInfoExtractor to avoid ConfigurationUtils (#115425) --- .../InferenceProcessorInfoExtractor.java | 45 +++++++++---------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java index e61342d281c90..83f7832645270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java @@ -10,9 +10,7 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestMetadata; -import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.transport.Transports; import java.util.HashMap; @@ -24,6 +22,7 @@ import java.util.function.Consumer; import static org.elasticsearch.inference.InferenceResults.MODEL_ID_RESULTS_FIELD; +import static org.elasticsearch.ingest.Pipeline.ON_FAILURE_KEY; import static org.elasticsearch.ingest.Pipeline.PROCESSORS_KEY; /** @@ -53,16 +52,10 @@ public static int countInferenceProcessors(ClusterState state) { Counter counter = Counter.newCounter(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = (List>) configMap.get(PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { - addModelsAndPipelines( - entry.getKey(), - pipelineId, - (Map) entry.getValue(), - pam -> counter.addAndGet(1), - 0 - ); + addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> counter.addAndGet(1), 0); } } }); @@ -73,7 +66,6 @@ public static int countInferenceProcessors(ClusterState state) { * @param ingestMetadata The ingestMetadata of current ClusterState * @return The set of model IDs referenced by inference processors */ - @SuppressWarnings("unchecked") public static Set getModelIdsFromInferenceProcessors(IngestMetadata ingestMetadata) { if (ingestMetadata == null) { return Set.of(); @@ -82,7 +74,7 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge Set modelIds = new LinkedHashSet<>(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> modelIds.add(pam.modelIdOrAlias()), 0); @@ -96,7 +88,6 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge * @param state Current cluster state * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Map> pipelineIdsByResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Map> pipelineIdsByModelIds = new HashMap<>(); @@ -110,7 +101,7 @@ public static Map> pipelineIdsByResource(ClusterState state, } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -128,7 +119,6 @@ public static Map> pipelineIdsByResource(ClusterState state, * @param state Current {@link ClusterState} * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Set pipelineIdsForResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Set pipelineIds = new HashSet<>(); @@ -142,7 +132,7 @@ public static Set pipelineIdsForResource(ClusterState state, Set } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -188,7 +178,7 @@ private static void addModelsAndPipelines( addModelsAndPipelines( innerProcessorWithName.getKey(), pipelineId, - (Map) innerProcessorWithName.getValue(), + innerProcessorWithName.getValue(), handler, level + 1 ); @@ -196,13 +186,8 @@ private static void addModelsAndPipelines( } return; } - if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(Pipeline.ON_FAILURE_KEY)) { - List> onFailureConfigs = ConfigurationUtils.readList( - null, - null, - (Map) definitionMap, - Pipeline.ON_FAILURE_KEY - ); + if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(ON_FAILURE_KEY)) { + List> onFailureConfigs = readList(definitionMap, ON_FAILURE_KEY); onFailureConfigs.stream() .flatMap(map -> map.entrySet().stream()) .forEach(entry -> addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), handler, level + 1)); @@ -211,4 +196,16 @@ private static void addModelsAndPipelines( private record PipelineAndModel(String pipelineId, String modelIdOrAlias) {} + /** + * A local alternative to ConfigurationUtils.readList(...) that reads list properties out of the processor configuration map, + * but doesn't rely on mutating the configuration map. + */ + @SuppressWarnings("unchecked") + private static List> readList(Map processorConfig, String key) { + Object val = processorConfig.get(key); + if (val == null) { + throw new IllegalArgumentException("Missing required property [" + key + "]"); + } + return (List>) val; + } } From b6d078908ca7dce354d24f3bcf8db5d604488c14 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 18:42:02 +0100 Subject: [PATCH 066/202] [ML] Add pathc transport version for change to Get Inference Request (#115250) --- .../src/main/java/org/elasticsearch/TransportVersions.java | 1 + .../core/inference/action/GetInferenceModelAction.java | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7e06004e47cfb..6d9bf2ac52f2d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -174,6 +174,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); + public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java index 6e06133509644..ea0462d0f103e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java @@ -62,7 +62,8 @@ public Request(StreamInput in) throws IOException { super(in); this.inferenceEntityId = in.readString(); this.taskType = TaskType.fromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || in.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { this.persistDefaultConfig = in.readBoolean(); } else { this.persistDefaultConfig = PERSIST_DEFAULT_CONFIGS; @@ -87,7 +88,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(inferenceEntityId); taskType.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || out.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { out.writeBoolean(this.persistDefaultConfig); } } From 57532e7b7fb1348c8abf50b225932cddea5937a9 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 23 Oct 2024 19:44:04 +0200 Subject: [PATCH 067/202] [test] Unmute FsDirectoryFactoryTests#testStoreDirectory (#115440) Resolve #110210 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f59ca0c213279..19e8416c396c3 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testStoreDirectory - issue: https://github.com/elastic/elasticsearch/issues/110210 - class: org.elasticsearch.index.store.FsDirectoryFactoryTests method: testPreload issue: https://github.com/elastic/elasticsearch/issues/110211 From e0acb56086a27c247867fa4b464973ebefef5230 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Wed, 23 Oct 2024 14:02:47 -0400 Subject: [PATCH 068/202] [ML] Mitigate IOSession timeouts (#115414) We are seeing exceptions ~0.03% of the time in our integration tests: ``` org.apache.http.ConnectionClosedException: Connection closed unexpectedly ``` The `contentDecoder` does not always fully consume the body within `SimpleInputBuffer.consumeContent`. When we return back to Apache, the rest of the body is never delivered, and the IOSession eventually times out and gets cleaned up. During that cleanup process, Apache calls our Consumer with the above exception. If we read 0 bytes and return back immediately, Apache has a better chance to load the rest of the body/footer, and it will call `consumeContent` again. This reduces the exception rate down to ~0.001%. Fix #114105 Fix #114232 Fix #114327 Fix #114385 --- docs/changelog/115414.yaml | 9 +++++++ muted-tests.yml | 12 --------- .../http/StreamingHttpResultPublisher.java | 26 +++++++++---------- .../services/InferenceEventsAssertion.java | 10 ++++--- .../anthropic/AnthropicServiceTests.java | 2 -- .../AzureAiStudioServiceTests.java | 2 -- .../azureopenai/AzureOpenAiServiceTests.java | 2 -- .../services/cohere/CohereServiceTests.java | 2 -- .../services/openai/OpenAiServiceTests.java | 2 -- 9 files changed, 27 insertions(+), 40 deletions(-) create mode 100644 docs/changelog/115414.yaml diff --git a/docs/changelog/115414.yaml b/docs/changelog/115414.yaml new file mode 100644 index 0000000000000..7475b765bb30e --- /dev/null +++ b/docs/changelog/115414.yaml @@ -0,0 +1,9 @@ +pr: 115414 +summary: Mitigate IOSession timeouts +area: Machine Learning +type: bug +issues: + - 114385 + - 114327 + - 114105 + - 114232 diff --git a/muted-tests.yml b/muted-tests.yml index 19e8416c396c3..cce16a07e647a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -215,24 +215,12 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114105 - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 - class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT method: testStepInfoPreservedOnAutoRetry issue: https://github.com/elastic/elasticsearch/issues/114220 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114232 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114327 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114385 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java index bf74ca86a969a..0b2268a448c8a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java @@ -96,11 +96,10 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t try { var consumed = inputBuffer.consumeContent(contentDecoder); - var allBytes = new byte[consumed]; - inputBuffer.read(allBytes); - - // we can have empty bytes, don't bother sending them - if (allBytes.length > 0) { + // we could have read 0 bytes if the body was delayed getting in, we need to return out so apache can load the body/footer + if (consumed > 0) { + var allBytes = new byte[consumed]; + inputBuffer.read(allBytes); queue.offer(() -> { subscriber.onNext(new HttpResult(response, allBytes)); var currentBytesInQueue = bytesInQueue.updateAndGet(current -> Long.max(0, current - allBytes.length)); @@ -111,18 +110,17 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t } } }); - } - // always check if totalByteSize > the configured setting in case the settings change - if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { - pauseProducer(ioControl); - } + // always check if totalByteSize > the configured setting in case the settings change + if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { + pauseProducer(ioControl); + } - // always run in case we're waking up from a pause and need to start a new thread - taskRunner.requestNextRun(); + taskRunner.requestNextRun(); - if (listenerCalled.compareAndSet(false, true)) { - listener.onResponse(this); + if (listenerCalled.compareAndSet(false, true)) { + listener.onResponse(this); + } } } finally { inputBuffer.reset(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java index f23ea2aa414b2..7cfd231be39f3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentFactory; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -26,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import static org.elasticsearch.test.ESTestCase.fail; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.CoreMatchers.is; @@ -47,7 +47,9 @@ public InferenceEventsAssertion hasFinishedStream() { } public InferenceEventsAssertion hasNoErrors() { - MatcherAssert.assertThat("Expected no errors from stream.", error, Matchers.nullValue()); + if (error != null) { + fail(error, "Expected no errors from stream."); + } return this; } @@ -66,7 +68,7 @@ public InferenceEventsAssertion hasErrorWithStatusCode(int statusCode) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected an underlying ElasticsearchStatusException."); + fail(error, "Expected an underlying ElasticsearchStatusException."); return this; } @@ -79,7 +81,7 @@ public InferenceEventsAssertion hasErrorContaining(String message) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected exception to contain string: " + message); + fail(error, "Expected exception to contain string: " + message); return this; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index 8adf75b4c0a81..48277112d9306 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -532,7 +532,6 @@ public void testInfer_SendsCompletionRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {"type": "message_start", "message": {"model": "claude, probably"}} @@ -578,7 +577,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ data: {"type": "error", "error": {"type": "request_too_large", "message": "blah"}} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 44b0d17d9b448..e85edf573ba96 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -1308,7 +1308,6 @@ public void testInfer_UnauthorisedResponse() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1364,7 +1363,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 900b666c0b8fb..3408fc358cac0 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -1425,7 +1425,6 @@ private void testChunkedInfer(AzureOpenAiEmbeddingsModel model) throws IOExcepti } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1484,7 +1483,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index cf114db45619f..758c38166778b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -1635,7 +1635,6 @@ public void testDefaultSimilarity() { assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ {"event_type":"text-generation", "text":"hello"} @@ -1669,7 +1668,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { "event_type":"stream-end", "finish_reason":"ERROR", "response":{ "text": "how dare you" } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index beba9b1a92477..cf1438b334478 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -1007,7 +1007,6 @@ public void testInfer_SendsRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1057,7 +1056,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { From 9eea83c45f1c800626d83067a9ecc9c1c9ef7e27 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 05:08:01 +1100 Subject: [PATCH 069/202] Mute org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT testFileSettingsReprocessedOnRestartWithoutVersionChange #115450 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cce16a07e647a..bd0145611237b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} issue: https://github.com/elastic/elasticsearch/issues/115412 +- class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT + method: testFileSettingsReprocessedOnRestartWithoutVersionChange + issue: https://github.com/elastic/elasticsearch/issues/115450 # Examples: # From 176015d59b71e633822cec21042c6f07c4a7b415 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 23 Oct 2024 12:07:10 -0700 Subject: [PATCH 070/202] Temporarily disable buildkite upload on Windows agents (#115449) --- .../gradle/internal/ElasticsearchBuildCompletePlugin.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 25ad5bcf89581..7d9537feaea56 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -15,6 +15,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,7 +62,7 @@ public void apply(Project target) { ? System.getenv("BUILD_NUMBER") : System.getenv("BUILDKITE_BUILD_NUMBER"); String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); - if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false && OS.current() != OS.WINDOWS) { File targetFile = calculateTargetFile(target, buildNumber); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); From 60678a1a9caf69177c588f7fc3c4585013e027f2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 23 Oct 2024 15:10:50 -0400 Subject: [PATCH 071/202] ESQL: Fix filtered grouping on ords (#115312) This fixes filtered aggs when they are grouped on a field with ordinals. This looks like: ``` | STATS max = max(salary) WHERE salary > 0 BY job_positions ``` when the `job_positions` field is a keyword field with doc values. In that case we use a faster group-by-segment-ordinals algorithm that needs to be able to merge the results of aggregators from multiple segments. This previously failed with a `ClassCastException` because of a mistake. Also! the group-by-segment-ordinals algorithm wasn't properly releasing the closure used to add inputs, causing a breaker size leak. This wasn't really leaking memory, but leaking *tracking* of memory. Closes #114897 --- docs/changelog/115312.yaml | 6 + .../FilteredGroupingAggregatorFunction.java | 2 +- .../operator/OrdinalsGroupingOperator.java | 4 +- .../FilteredAggregatorFunctionTests.java | 1 - ...lteredGroupingAggregatorFunctionTests.java | 39 +++++- .../GroupingAggregatorFunctionTestCase.java | 7 +- .../src/main/resources/stats.csv-spec | 126 ++++++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 + 8 files changed, 183 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115312.yaml diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml new file mode 100644 index 0000000000000..acf6bbc69c36c --- /dev/null +++ b/docs/changelog/115312.yaml @@ -0,0 +1,6 @@ +pr: 115312 +summary: "ESQL: Fix filtered grouping on ords" +area: ES|QL +type: bug +issues: + - 114897 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java index 3e38b6d6fe9fa..8d3dbf3164c47 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java @@ -97,7 +97,7 @@ public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Pa @Override public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { - next.addIntermediateRowInput(groupId, input, position); + next.addIntermediateRowInput(groupId, ((FilteredGroupingAggregatorFunction) input).next(), position); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 5e0e625abb914..7cf47bc7fed1c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -372,8 +372,8 @@ static final class OrdinalSegmentAggregator implements Releasable, SeenGroupIds } void addInput(IntVector docs, Page page) { + GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; try { - GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; for (int i = 0; i < prepared.length; i++) { prepared[i] = aggregators.get(i).prepareProcessPage(this, page); } @@ -392,7 +392,7 @@ void addInput(IntVector docs, Page page) { } catch (IOException e) { throw new UncheckedIOException(e); } finally { - page.releaseBlocks(); + Releasables.close(page::releaseBlocks, Releasables.wrap(prepared)); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java index da2c3502144db..35ecced470e01 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java @@ -27,7 +27,6 @@ public class FilteredAggregatorFunctionTests extends AggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java index 87cb99bd0709f..26971dc927cd1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java @@ -11,12 +11,14 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.LongIntBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.junit.After; @@ -31,7 +33,6 @@ public class FilteredGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( @@ -104,6 +105,42 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { ); } + /** + * Tests {@link GroupingAggregator#addIntermediateRow} by building results using the traditional + * add mechanism and using {@link GroupingAggregator#addIntermediateRow} then asserting that they + * produce the same output. + */ + public void testAddIntermediateRowInput() { + DriverContext ctx = driverContext(); + AggregatorFunctionSupplier supplier = aggregatorFunction(channels(AggregatorMode.SINGLE)); + Block[] results = new Block[2]; + try ( + GroupingAggregatorFunction main = supplier.groupingAggregator(ctx); + GroupingAggregatorFunction leaf = supplier.groupingAggregator(ctx); + SourceOperator source = simpleInput(ctx.blockFactory(), 10); + ) { + Page p; + while ((p = source.getOutput()) != null) { + try ( + IntVector group = ctx.blockFactory().newConstantIntVector(0, p.getPositionCount()); + GroupingAggregatorFunction.AddInput addInput = leaf.prepareProcessPage(null, p) + ) { + addInput.add(0, group); + } finally { + p.releaseBlocks(); + } + } + main.addIntermediateRowInput(0, leaf, 0); + try (IntVector selected = ctx.blockFactory().newConstantIntVector(0, 1)) { + main.evaluateFinal(results, 0, selected, ctx); + leaf.evaluateFinal(results, 1, selected, ctx); + } + assertThat(results[0], equalTo(results[1])); + } finally { + Releasables.close(results); + } + } + @After public void checkUnclosed() { for (Exception tracker : unclosed) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index 9414e076a26e6..cb190dfffafb9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -89,14 +89,17 @@ protected final Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { return simpleWithMode(mode, Function.identity()); } + protected List channels(AggregatorMode mode) { + return mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); + } + private Operator.OperatorFactory simpleWithMode( AggregatorMode mode, Function wrap ) { - List channels = mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); int emitChunkSize = between(100, 200); - AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels)); + AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels(mode))); if (randomBoolean()) { supplier = chunkGroups(emitChunkSize, supplier); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 6d4c596e8d7de..2dc21a86e6394 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2529,3 +2529,129 @@ FROM employees | eval x = [1,2,3], y = 5 + 6 | stats m = max(y) by y+1 m:integer | y+1:integer 11 | 12 ; + +filterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE salary > 0 +; + +max:integer +74999 +; + +filterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name == "" +; + +max:integer +null +; + +filterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL +; + +max:integer +70011 +; + +groupingFilterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE salary > 0 BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | A +58121 | B +74999 | D +58715 | H +; + +groupingFilterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name == "" BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | A +null | B +null | D +null | H +; + +groupingFilterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +62233 | A +39878 | B +67492 | D +null | H +; + +groupingByOrdinalsFilterIsAlwaysTrue +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE salary > 0 BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | Accountant +69904 | Architect +58121 | Business Analyst +74999 | Data Scientist +; + +groupingByOrdinalsFilterIsAlwaysFalse +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name == "" BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | Accountant +null | Architect +null | Business Analyst +null | Data Scientist +; + +groupingByOrdinalsFilterSometimesMatches +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +39878 | Accountant +62233 | Architect +39878 | Business Analyst +67492 | Data Scientist +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5e336e6759b1e..dfca6ab2bf814 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -399,6 +399,11 @@ public enum Cap { */ PER_AGG_FILTERING, + /** + * Fix {@link #PER_AGG_FILTERING} grouped by ordinals. + */ + PER_AGG_FILTERING_ORDS, + /** * Fix for https://github.com/elastic/elasticsearch/issues/114714 */ From c9f995ad92145733900465e8383ab0f38882cda7 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Wed, 23 Oct 2024 16:17:40 -0400 Subject: [PATCH 072/202] Log reindexing failures (#112676) Wait for reindexing tasks to finish during shutdown for an amount of time defined by settings. Also log the number of reindexing tasks still in flight after the wait. --- .../index/reindex/ReindexNodeShutdownIT.java | 139 +++++++++++++ .../common/settings/ClusterSettings.java | 3 + .../java/org/elasticsearch/node/Node.java | 119 +---------- .../elasticsearch/node/NodeConstruction.java | 3 + .../node/ShutdownPrepareService.java | 184 ++++++++++++++++++ 5 files changed, 332 insertions(+), 116 deletions(-) create mode 100644 modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java create mode 100644 server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java new file mode 100644 index 0000000000000..4a001bb2d0969 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.ShutdownPrepareService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.node.ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +/** + * Test that a wait added during shutdown is necessary for a large reindexing task to complete. + * The test works as follows: + * 1. Start a large (reasonably long running) reindexing request on the coordinator-only node. + * 2. Check that the reindexing task appears on the coordinating node + * 3. With a 10s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING, + * wait for the reindexing task to complete before closing the node + * 4. Confirm that the reindexing task succeeds with the wait (it will fail without it) + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class ReindexNodeShutdownIT extends ESIntegTestCase { + + protected static final String INDEX = "reindex-shutdown-index"; + protected static final String DEST_INDEX = "dest-index"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class); + } + + protected ReindexRequestBuilder reindex(String nodeName) { + return new ReindexRequestBuilder(internalCluster().client(nodeName)); + } + + public void testReindexWithShutdown() throws Exception { + final String masterNodeName = internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final Settings COORD_SETTINGS = Settings.builder() + .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(10)) + .build(); + final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + ensureStableCluster(3); + + int numDocs = 20000; + createIndex(numDocs); + createReindexTaskAndShutdown(coordNodeName); + checkDestinationIndex(dataNodeName, numDocs); + } + + private void createIndex(int numDocs) { + // INDEX will be created on the dataNode + createIndex(INDEX); + + logger.debug("setting up [{}] docs", numDocs); + indexRandom( + true, + false, + true, + IntStream.range(0, numDocs) + .mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) + .collect(Collectors.toList()) + ); + + // Checks that the all documents have been indexed and correctly counted + assertHitCount(prepareSearch(INDEX).setSize(0).setTrackTotalHits(true), numDocs); + } + + private void createReindexTaskAndShutdown(final String coordNodeName) throws Exception { + AbstractBulkByScrollRequestBuilder builder = reindex(coordNodeName).source(INDEX).destination(DEST_INDEX); + AbstractBulkByScrollRequest reindexRequest = builder.request(); + ShutdownPrepareService shutdownPrepareService = internalCluster().getInstance(ShutdownPrepareService.class, coordNodeName); + + TaskManager taskManager = internalCluster().getInstance(TransportService.class, coordNodeName).getTaskManager(); + + // Now execute the reindex action... + ActionListener reindexListener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkByScrollResponse) { + assertNull(bulkByScrollResponse.getReasonCancelled()); + logger.debug(bulkByScrollResponse.toString()); + } + + @Override + public void onFailure(Exception e) { + logger.debug("Encounterd " + e.toString()); + fail(e, "Encounterd " + e.toString()); + } + }; + internalCluster().client(coordNodeName).execute(ReindexAction.INSTANCE, reindexRequest, reindexListener); + + // Check for reindex task to appear in the tasks list and Immediately stop coordinating node + waitForTask(ReindexAction.INSTANCE.name(), coordNodeName); + shutdownPrepareService.prepareForShutdown(taskManager); + internalCluster().stopNode(coordNodeName); + } + + // Make sure all documents from the source index have been reindexed into the destination index + private void checkDestinationIndex(String dataNodeName, int numDocs) throws Exception { + assertTrue(indexExists(DEST_INDEX)); + flushAndRefresh(DEST_INDEX); + assertBusy(() -> { assertHitCount(prepareSearch(DEST_INDEX).setSize(0).setTrackTotalHits(true), numDocs); }); + } + + private static void waitForTask(String actionName, String nodeName) throws Exception { + assertBusy(() -> { + ListTasksResponse tasks = clusterAdmin().prepareListTasks(nodeName).setActions(actionName).setDetailed(true).get(); + tasks.rethrowFailures("Find my task"); + for (TaskInfo taskInfo : tasks.getTasks()) { + // Skip tasks with a parent because those are children of the task we want + if (taskInfo.parentTaskId().isSet() == false) return; + } + fail("Couldn't find task after waiting, tasks=" + tasks.getTasks()); + }, 10, TimeUnit.SECONDS); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 8cbacccb915ac..7bb78eabc8727 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -108,6 +108,7 @@ import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeRoleSettings; +import org.elasticsearch.node.ShutdownPrepareService; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; @@ -456,6 +457,8 @@ public void apply(Settings value, Settings current, Settings previous) { Environment.PATH_SHARED_DATA_SETTING, NodeEnvironment.NODE_ID_SEED_SETTING, Node.INITIAL_STATE_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_SHUTDOWN_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING, DiscoveryModule.ELECTION_STRATEGY_SETTING, diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 32a65302922a8..e30f76fdd9414 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -13,10 +13,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.RefCountingListener; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.internal.Client; @@ -82,7 +78,6 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancellationService; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; @@ -106,18 +101,12 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; import javax.net.ssl.SNIHostName; -import static org.elasticsearch.core.Strings.format; - /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. @@ -161,12 +150,6 @@ public class Node implements Closeable { Property.NodeScope ); - public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( - "node.maximum_shutdown_grace_period", - TimeValue.ZERO, - Setting.Property.NodeScope - ); - private final Lifecycle lifecycle = new Lifecycle(); /** @@ -187,6 +170,7 @@ public class Node implements Closeable { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final TerminationHandler terminationHandler; + // for testing final NamedWriteableRegistry namedWriteableRegistry; final NamedXContentRegistry namedXContentRegistry; @@ -606,105 +590,8 @@ public synchronized void close() throws IOException { * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. */ public void prepareForClose() { - final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); - - record Stopper(String name, SubscribableListener listener) { - boolean isIncomplete() { - return listener().isDone() == false; - } - } - - final var stoppers = new ArrayList(); - final var allStoppersFuture = new PlainActionFuture(); - try (var listeners = new RefCountingListener(allStoppersFuture)) { - final BiConsumer stopperRunner = (name, action) -> { - final var stopper = new Stopper(name, new SubscribableListener<>()); - stoppers.add(stopper); - stopper.listener().addListener(listeners.acquire()); - new Thread(() -> { - try { - action.run(); - } catch (Exception ex) { - logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); - } finally { - stopper.listener().onResponse(null); - } - }, stopper.name()).start(); - }; - - stopperRunner.accept("http-server-transport-stop", injector.getInstance(HttpServerTransport.class)::close); - stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout)); - if (terminationHandler != null) { - stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); - } - } - - final Supplier incompleteStoppersDescriber = () -> stoppers.stream() - .filter(Stopper::isIncomplete) - .map(Stopper::name) - .collect(Collectors.joining(", ", "[", "]")); - - try { - if (TimeValue.ZERO.equals(maxTimeout)) { - allStoppersFuture.get(); - } else { - allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); - } - } catch (ExecutionException e) { - assert false : e; // listeners are never completed exceptionally - logger.warn("failed during graceful shutdown tasks", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); - } catch (TimeoutException e) { - logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); - } - } - - private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout) { - TaskManager taskManager = injector.getInstance(TransportService.class).getTaskManager(); - long millisWaited = 0; - while (true) { - long searchTasksRemaining = taskManager.getTasks() - .values() - .stream() - .filter(task -> TransportSearchAction.TYPE.name().equals(task.getAction())) - .count(); - if (searchTasksRemaining == 0) { - logger.debug("all search tasks complete"); - return; - } else { - // Let the system work on those searches for a while. We're on a dedicated thread to manage app shutdown, so we - // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short - // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could - // be spending on finishing those searches. - final TimeValue pollPeriod = TimeValue.timeValueMillis(500); - millisWaited += pollPeriod.millis(); - if (TimeValue.ZERO.equals(asyncSearchTimeout) == false && millisWaited >= asyncSearchTimeout.millis()) { - logger.warn( - format( - "timed out after waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - logger.debug(format("waiting for [%s] search tasks to finish, next poll in [%s]", searchTasksRemaining, pollPeriod)); - try { - Thread.sleep(pollPeriod.millis()); - } catch (InterruptedException ex) { - logger.warn( - format( - "interrupted while waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - } - } + injector.getInstance(ShutdownPrepareService.class) + .prepareForShutdown(injector.getInstance(TransportService.class).getTaskManager()); } /** diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 8e66486329577..7e3991c1df1f4 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1099,6 +1099,8 @@ private void construct( telemetryProvider.getTracer() ); + final ShutdownPrepareService shutdownPrepareService = new ShutdownPrepareService(settings, httpServerTransport, terminationHandler); + modules.add( loadPersistentTasksService( settingsModule, @@ -1200,6 +1202,7 @@ private void construct( b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); + b.bind(ShutdownPrepareService.class).toInstance(shutdownPrepareService); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java new file mode 100644 index 0000000000000..ab9537053f45d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.node.internal.TerminationHandler; +import org.elasticsearch.tasks.TaskManager; + +import java.util.ArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.core.Strings.format; + +/** + * This class was created to extract out the logic from {@link Node#prepareForClose()} to facilitate testing. + *

+ * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ +public class ShutdownPrepareService { + + private final Logger logger = LogManager.getLogger(ShutdownPrepareService.class); + private final Settings settings; + private final HttpServerTransport httpServerTransport; + private final TerminationHandler terminationHandler; + private volatile boolean hasBeenShutdown = false; + + public ShutdownPrepareService(Settings settings, HttpServerTransport httpServerTransport, TerminationHandler terminationHandler) { + this.settings = settings; + this.httpServerTransport = httpServerTransport; + this.terminationHandler = terminationHandler; + } + + public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_shutdown_grace_period", + TimeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final Setting MAXIMUM_REINDEXING_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_reindexing_grace_period", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope + ); + + /** + * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. This method will block + * until the node is ready to shut down. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ + public void prepareForShutdown(TaskManager taskManager) { + assert hasBeenShutdown == false; + hasBeenShutdown = true; + final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(settings); + final var reindexTimeout = MAXIMUM_REINDEXING_TIMEOUT_SETTING.get(settings); + + record Stopper(String name, SubscribableListener listener) { + boolean isIncomplete() { + return listener().isDone() == false; + } + } + + final var stoppers = new ArrayList(); + final var allStoppersFuture = new PlainActionFuture(); + try (var listeners = new RefCountingListener(allStoppersFuture)) { + final BiConsumer stopperRunner = (name, action) -> { + final var stopper = new Stopper(name, new SubscribableListener<>()); + stoppers.add(stopper); + stopper.listener().addListener(listeners.acquire()); + new Thread(() -> { + try { + action.run(); + } catch (Exception ex) { + logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); + } finally { + stopper.listener().onResponse(null); + } + }, stopper.name()).start(); + }; + + stopperRunner.accept("http-server-transport-stop", httpServerTransport::close); + stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout, taskManager)); + stopperRunner.accept("reindex-stop", () -> awaitReindexTasksComplete(reindexTimeout, taskManager)); + if (terminationHandler != null) { + stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); + } + } + + final Supplier incompleteStoppersDescriber = () -> stoppers.stream() + .filter(Stopper::isIncomplete) + .map(Stopper::name) + .collect(Collectors.joining(", ", "[", "]")); + + try { + if (TimeValue.ZERO.equals(maxTimeout)) { + allStoppersFuture.get(); + } else { + allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); + } + } catch (ExecutionException e) { + assert false : e; // listeners are never completed exceptionally + logger.warn("failed during graceful shutdown tasks", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); + } catch (TimeoutException e) { + logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); + } + } + + private void awaitTasksComplete(TimeValue timeout, String taskName, TaskManager taskManager) { + long millisWaited = 0; + while (true) { + long tasksRemaining = taskManager.getTasks().values().stream().filter(task -> taskName.equals(task.getAction())).count(); + if (tasksRemaining == 0) { + logger.debug("all " + taskName + " tasks complete"); + return; + } else { + // Let the system work on those tasks for a while. We're on a dedicated thread to manage app shutdown, so we + // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short + // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could + // be spending on finishing those tasks. + final TimeValue pollPeriod = TimeValue.timeValueMillis(500); + millisWaited += pollPeriod.millis(); + if (TimeValue.ZERO.equals(timeout) == false && millisWaited >= timeout.millis()) { + logger.warn( + format("timed out after waiting [%s] for [%d] " + taskName + " tasks to finish", timeout.toString(), tasksRemaining) + ); + return; + } + logger.debug(format("waiting for [%s] " + taskName + " tasks to finish, next poll in [%s]", tasksRemaining, pollPeriod)); + try { + Thread.sleep(pollPeriod.millis()); + } catch (InterruptedException ex) { + logger.warn( + format( + "interrupted while waiting [%s] for [%d] " + taskName + " tasks to finish", + timeout.toString(), + tasksRemaining + ) + ); + return; + } + } + } + } + + private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncSearchTimeout, TransportSearchAction.NAME, taskManager); + } + + private void awaitReindexTasksComplete(TimeValue asyncReindexTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncReindexTimeout, ReindexAction.NAME, taskManager); + } + +} From f04bf5c3561f19f30f21ba28419c8e7ed6ed7b3a Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Wed, 23 Oct 2024 13:22:26 -0700 Subject: [PATCH 073/202] Apply workaround for synthetic source of object arrays inside nested objects (#115275) --- rest-api-spec/build.gradle | 1 + .../21_synthetic_source_stored.yml | 11 ++- .../index/mapper/DocumentParser.java | 6 +- .../index/mapper/DocumentParserContext.java | 39 +++++--- .../mapper/IgnoredSourceFieldMapper.java | 3 + .../index/mapper/MapperFeatures.java | 3 +- .../mapper/IgnoredSourceFieldMapperTests.java | 88 +++++++++++++++++++ 7 files changed, 132 insertions(+), 19 deletions(-) diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7525ff2dc12d2..4bd293f0a8641 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,4 +59,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") + task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index eab51427876aa..6a4e92f694220 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -319,8 +319,8 @@ object param - nested object array next to other fields: --- object param - nested object with stored array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source + cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested", "mapper.bwc_workaround_9_0"] + reason: requires fix to object array handling - do: indices.create: @@ -356,8 +356,11 @@ object param - nested object with stored array: sort: name - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: A } - - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } - - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } + # due to a workaround for #115261 + - match: { hits.hits.0._source.nested_array_regular.0.b.0.c: 10 } + - match: { hits.hits.0._source.nested_array_regular.0.b.1.c: 100 } + - match: { hits.hits.0._source.nested_array_regular.1.b.0.c: 20 } + - match: { hits.hits.0._source.nested_array_regular.1.b.1.c: 200 } - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index bac987a3df96d..1ed0a117ddd89 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -810,8 +810,10 @@ private static void parseNonDynamicArray( boolean objectWithFallbackSyntheticSource = false; if (mapper instanceof ObjectMapper objectMapper) { mode = getSourceKeepMode(context, objectMapper.sourceKeepMode()); - objectWithFallbackSyntheticSource = (mode == Mapper.SourceKeepMode.ALL - || (mode == Mapper.SourceKeepMode.ARRAYS && objectMapper instanceof NestedObjectMapper == false)); + objectWithFallbackSyntheticSource = mode == Mapper.SourceKeepMode.ALL + // Inside nested objects we always store object arrays as a workaround for #115261. + || ((context.inNestedScope() || mode == Mapper.SourceKeepMode.ARRAYS) + && objectMapper instanceof NestedObjectMapper == false); } boolean fieldWithFallbackSyntheticSource = false; boolean fieldWithStoredArraySource = false; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index ef87ce52fbabf..3b1f1a6d2809a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -104,6 +104,16 @@ public int get() { } } + /** + * Defines the scope parser is currently in. + * This is used for synthetic source related logic during parsing. + */ + private enum Scope { + SINGLETON, + ARRAY, + NESTED + } + private final MappingLookup mappingLookup; private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; @@ -112,7 +122,7 @@ public int get() { private final List ignoredFieldValues; private final List ignoredFieldsMissingValues; private boolean inArrayScopeEnabled; - private boolean inArrayScope; + private Scope currentScope; private final Map> dynamicMappers; private final DynamicMapperSize dynamicMappersSize; @@ -145,7 +155,7 @@ private DocumentParserContext( List ignoredFieldValues, List ignoredFieldsWithNoSource, boolean inArrayScopeEnabled, - boolean inArrayScope, + Scope currentScope, Map> dynamicMappers, Map dynamicObjectMappers, Map> dynamicRuntimeFields, @@ -167,7 +177,7 @@ private DocumentParserContext( this.ignoredFieldValues = ignoredFieldValues; this.ignoredFieldsMissingValues = ignoredFieldsWithNoSource; this.inArrayScopeEnabled = inArrayScopeEnabled; - this.inArrayScope = inArrayScope; + this.currentScope = currentScope; this.dynamicMappers = dynamicMappers; this.dynamicObjectMappers = dynamicObjectMappers; this.dynamicRuntimeFields = dynamicRuntimeFields; @@ -192,7 +202,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.ignoredFieldValues, in.ignoredFieldsMissingValues, in.inArrayScopeEnabled, - in.inArrayScope, + in.currentScope, in.dynamicMappers, in.dynamicObjectMappers, in.dynamicRuntimeFields, @@ -224,7 +234,7 @@ protected DocumentParserContext( new ArrayList<>(), new ArrayList<>(), mappingParserContext.getIndexSettings().isSyntheticSourceSecondDocParsingPassEnabled(), - false, + Scope.SINGLETON, new HashMap<>(), new HashMap<>(), new HashMap<>(), @@ -335,7 +345,7 @@ public final void deduplicateIgnoredFieldValues(final Set fullNames) { public final DocumentParserContext addIgnoredFieldFromContext(IgnoredSourceFieldMapper.NameValue ignoredFieldWithNoSource) throws IOException { if (canAddIgnoredField()) { - if (inArrayScope) { + if (currentScope == Scope.ARRAY) { // The field is an array within an array, store all sub-array elements. ignoredFieldsMissingValues.add(ignoredFieldWithNoSource); return cloneWithRecordedSource(); @@ -379,10 +389,10 @@ public final DocumentParserContext maybeCloneForArray(Mapper mapper) throws IOEx if (canAddIgnoredField() && mapper instanceof ObjectMapper && mapper instanceof NestedObjectMapper == false - && inArrayScope == false + && currentScope != Scope.ARRAY && inArrayScopeEnabled) { DocumentParserContext subcontext = switchParser(parser()); - subcontext.inArrayScope = true; + subcontext.currentScope = Scope.ARRAY; return subcontext; } return this; @@ -673,6 +683,10 @@ public boolean isWithinCopyTo() { return false; } + public boolean inNestedScope() { + return currentScope == Scope.NESTED; + } + public final DocumentParserContext createChildContext(ObjectMapper parent) { return new Wrapper(parent, this); } @@ -716,10 +730,11 @@ public LuceneDocument doc() { return document; } }; - // Disable tracking array scopes for ignored source, as it would be added to the parent doc. - // Nested documents are added to preserve object structure within arrays of objects, so the use - // of ignored source for arrays inside them should be mostly redundant. - cloned.inArrayScope = false; + + cloned.currentScope = Scope.NESTED; + // Disable using second parsing pass since it currently can not determine which parts + // of source belong to which nested document. + // See #115261. cloned.inArrayScopeEnabled = false; return cloned; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 296c2c5311d9a..70d73fc2ffb9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( + "mapper.ignored_source.always_store_object_arrays_in_nested" + ); /* Setting to disable encoding and writing values for this field. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 31c89b2fc8ad4..026c7c98d7aeb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -62,7 +62,8 @@ public Set getTestFeatures() { return Set.of( RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, - SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION + SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 934744ef3ef96..7a4ce8bcb03fa 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -962,6 +962,94 @@ public void testArrayWithNestedObjects() throws IOException { {"path":{"to":[{"id":[1,20,3]},{"id":10},{"id":0}]}}""", syntheticSource); } + public void testObjectArrayWithinNestedObjects() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to"); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":{"obj":[{"id":[1,20,3]},{"id":10}]}}}""", syntheticSource); + } + + public void testObjectArrayWithinNestedObjectsArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startArray("to"); + { + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 200, 300, 500).endObject(); + b.startObject().field("id", 100).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endArray(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":[{"obj":[{"id":[1,20,3]},{"id":10}]},{"obj":[{"id":[200,300,500]},{"id":100}]}]}}""", syntheticSource); + } + public void testArrayWithinArray() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("path"); From d8bcbb6bede44334719d1879bf8603425e29a731 Mon Sep 17 00:00:00 2001 From: Paul Tavares <56442535+paul-tavares@users.noreply.github.com> Date: Wed, 23 Oct 2024 16:29:34 -0400 Subject: [PATCH 074/202] [Security Solution] Add `create_index` to `kibana_system` role for Elastic Defend indices (#115241) Adds create_index privilege to the kibana_system role for Elastic Defend internal indices Indices: ``` .logs-endpoint.heartbeat-* .logs-endpoint.diagnostic.collection-* .logs-endpoint.action.responses-* ``` --- docs/changelog/115241.yaml | 6 ++++ .../KibanaOwnedReservedRoleDescriptors.java | 19 +++++++---- .../authz/store/ReservedRolesStoreTests.java | 34 +++++++++---------- 3 files changed, 35 insertions(+), 24 deletions(-) create mode 100644 docs/changelog/115241.yaml diff --git a/docs/changelog/115241.yaml b/docs/changelog/115241.yaml new file mode 100644 index 0000000000000..b7119d7f6aaeb --- /dev/null +++ b/docs/changelog/115241.yaml @@ -0,0 +1,6 @@ +pr: 115241 +summary: "[Security Solution] Add `create_index` to `kibana_system` role for index/DS\ + \ `.logs-endpoint.action.responses-*`" +area: Authorization +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 0028508e87f32..5fb753ab55aab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -152,8 +152,11 @@ static RoleDescriptor kibanaSystem(String name) { // Data telemetry reads mappings, metadata and stats of indices RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata", "monitor").build(), // Endpoint diagnostic information. Kibana reads from these indices to send - // telemetry - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*").privileges("read").build(), + // telemetry and also creates the index when policies are first created + RoleDescriptor.IndicesPrivileges.builder() + .indices(".logs-endpoint.diagnostic.collection-*") + .privileges("read", "create_index") + .build(), // Fleet secrets. Kibana can only write to this index. RoleDescriptor.IndicesPrivileges.builder() .indices(".fleet-secrets*") @@ -277,17 +280,19 @@ static RoleDescriptor kibanaSystem(String name) { ) .build(), // Endpoint specific action responses. Kibana reads and writes (for third party - // agents) to the index - // to display action responses to the user. + // agents) to the index to display action responses to the user. + // `create_index`: is necessary in order to ensure that the DOT datastream index is + // created by Kibana in order to avoid errors on the Elastic Defend side when streaming + // documents to it. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.action.responses-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Endpoint specific actions. Kibana reads and writes to this index to track new // actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Legacy Osquery manager specific action responses. Kibana reads from these to // display responses to the user. @@ -475,7 +480,7 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices(".slo-observability.*").privileges("all").build(), // Endpoint heartbeat. Kibana reads from these to determine metering/billing for // endpoints. - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read").build(), + RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read", "create_index").build(), // For connectors telemetry. Will be removed once we switched to connectors API RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, null, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 26b306d6f1334..a71ac6a9b51fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -801,7 +801,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(mockIndexAbstraction(index)), - is(false) + is(true) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -949,7 +949,7 @@ public void testKibanaSystemRole() { ); }); - // read-only index for Endpoint and Osquery manager specific action responses + // Elastic Defend internal index for response actions results Arrays.asList(".logs-endpoint.action.responses-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((index) -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(index); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); @@ -959,10 +959,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1069,10 +1066,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1097,10 +1091,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1319,12 +1310,21 @@ public void testKibanaSystemRole() { final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") || indexName.startsWith(".logs-endpoint.action.responses-"); + + final boolean isAlsoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") + || indexName.startsWith(".logs-endpoint.action.responses-") + || indexName.startsWith(".logs-endpoint.diagnostic.collection-") + || indexName.startsWith(".logs-endpoint.heartbeat-"); + assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) + is(isAlsoCreateIndex) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoCreateIndex)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), + is(isAlsoCreateIndex) ); - assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(false)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex) From 0fc0922ff0aade7189fadda137bb5aa8a0474997 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 23 Oct 2024 22:10:45 +0100 Subject: [PATCH 075/202] [ML] Fix NPE in Get Deployment Stats (#115404) If a node has been removed from the cluster and the trained model assignment has not been updated the GET stats action can have an inconsistent view where it thinks a model is deployed on the removed node. The bug only affected nodes with failed deployments. --- docs/changelog/115404.yaml | 5 +++ .../TransportGetDeploymentStatsAction.java | 2 +- ...ransportGetDeploymentStatsActionTests.java | 39 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115404.yaml diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml new file mode 100644 index 0000000000000..e443b152955f3 --- /dev/null +++ b/docs/changelog/115404.yaml @@ -0,0 +1,5 @@ +pr: 115404 +summary: Fix NPE in Get Deployment Stats +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 980cdc09252cb..9ebc510af4f4d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -220,7 +220,7 @@ static GetDeploymentStatsAction.Response addFailedRoutes( // add nodes from the failures that were not in the task responses for (var nodeRoutingState : nodeToRoutingStates.entrySet()) { - if (visitedNodes.contains(nodeRoutingState.getKey()) == false) { + if ((visitedNodes.contains(nodeRoutingState.getKey()) == false) && nodes.nodeExists(nodeRoutingState.getKey())) { updatedNodeStats.add( AssignmentStats.NodeStats.forNotStartedState( nodes.get(nodeRoutingState.getKey()), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java index 4a66be4a773f5..2490cd8d5ab21 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsActionTests.java @@ -148,6 +148,45 @@ public void testAddFailedRoutes_TaskResultIsOverwritten() throws UnknownHostExce assertEquals(RoutingState.FAILED, results.get(0).getNodeStats().get(1).getRoutingState().getState()); } + public void testAddFailedRoutes_MissingNode() throws UnknownHostException { + DiscoveryNodes nodes = buildNodes("node1", "node2"); + var missingNode = DiscoveryNodeUtils.create( + "node3", + new TransportAddress(InetAddress.getByAddress(new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1 }), 9203) + ); + + List nodeStatsList = new ArrayList<>(); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node1"))); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node2"))); + + var model1 = new AssignmentStats( + "model1", + "deployment1", + randomBoolean() ? null : randomIntBetween(1, 8), + randomBoolean() ? null : randomIntBetween(1, 8), + null, + randomBoolean() ? null : randomIntBetween(1, 10000), + randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), + Instant.now(), + nodeStatsList, + randomFrom(Priority.values()) + ); + var response = new GetDeploymentStatsAction.Response(Collections.emptyList(), Collections.emptyList(), List.of(model1), 1); + + // failed state for node 3 conflicts + Map> badRoutes = new HashMap<>(); + Map nodeRoutes = new HashMap<>(); + nodeRoutes.put("node3", new RoutingInfo(1, 1, RoutingState.FAILED, "failed on node3")); + badRoutes.put(createAssignment("model1"), nodeRoutes); + + var modified = TransportGetDeploymentStatsAction.addFailedRoutes(response, badRoutes, nodes); + List results = modified.getStats().results(); + assertThat(results, hasSize(1)); + assertThat(results.get(0).getNodeStats(), hasSize(2)); // 3 + assertEquals("node1", results.get(0).getNodeStats().get(0).getNode().getId()); + assertEquals("node2", results.get(0).getNodeStats().get(1).getNode().getId()); + } + private DiscoveryNodes buildNodes(String... nodeIds) throws UnknownHostException { InetAddress inetAddress = InetAddress.getByAddress(new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1 }); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); From f2b146ed1c19f3801224c39c67a49800d980bca9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 23 Oct 2024 17:38:10 -0400 Subject: [PATCH 076/202] ESQL: Fix test muting (#115448) (#115466) Fix the test muting on the test for grapheme clusters - it should only allow the test if we're on the 20+ jvm. Closes #114536 --- .../src/test/java/org/elasticsearch/xpack/esql/CsvTests.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 63233f0c46a0d..3119fd4b52153 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -259,6 +259,10 @@ public final void test() throws Throwable { testCase.requiredCapabilities, everyItem(in(EsqlCapabilities.capabilities(true))) ); + assumeTrue( + "Capability not supported in this build", + EsqlCapabilities.capabilities(false).containsAll(testCase.requiredCapabilities) + ); } else { for (EsqlCapabilities.Cap c : EsqlCapabilities.Cap.values()) { if (false == c.isEnabled()) { From 2f64d2037a23de4d9d0da3efb7dca182e066ef48 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 00:42:58 +0200 Subject: [PATCH 077/202] [test] Unmute FsDirectoryFactoryTests#testPreload (#115438) Resolve #110211 --- muted-tests.yml | 3 --- .../org/elasticsearch/index/store/FsDirectoryFactoryTests.java | 2 -- 2 files changed, 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index bd0145611237b..8b9c3cc6ce712 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testPreload - issue: https://github.com/elastic/elasticsearch/issues/110211 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 38e6ca0be0647..b0a14515f2fbc 100644 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -115,8 +115,6 @@ private void doTestPreload(String... preload) throws IOException { var func = fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.ALL_FILES); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.NO_FILES); - assertTrue(func.test("foo.dvd", newIOContext(random()))); - assertTrue(func.test("foo.tmp", newIOContext(random()))); for (String ext : preload) { assertTrue("ext: " + ext, func.test("foo." + ext, newIOContext(random()))); } From 92ecd36a031de094cda642f14000bea545b01740 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:00:48 -0400 Subject: [PATCH 078/202] [ES|QL] Simplify syntax of named parameter for identifier and pattern (#115061) * simplify syntax of named parameter for identifier and pattern --- docs/changelog/115061.yaml | 5 + .../xpack/esql/qa/rest/RestEsqlTestCase.java | 23 ++-- .../xpack/esql/action/EsqlCapabilities.java | 12 +- .../xpack/esql/action/RequestXContent.java | 105 +++++++++--------- .../esql/action/EsqlQueryRequestTests.java | 76 +++++++------ .../xpack/esql/analysis/AnalyzerTests.java | 8 +- .../esql/parser/StatementParserTests.java | 8 +- .../rest-api-spec/test/esql/10_basic.yml | 4 +- 8 files changed, 122 insertions(+), 119 deletions(-) create mode 100644 docs/changelog/115061.yaml diff --git a/docs/changelog/115061.yaml b/docs/changelog/115061.yaml new file mode 100644 index 0000000000000..7d40d5ae2629e --- /dev/null +++ b/docs/changelog/115061.yaml @@ -0,0 +1,5 @@ +pr: 115061 +summary: "[ES|QL] Simplify syntax of named parameter for identifier and pattern" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 2a50988e9e35e..8c52a24231a41 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -672,7 +672,7 @@ public void testErrorMessageForArrayValuesInParams() throws IOException { public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); bulkLoadTestData(10); // positive @@ -684,12 +684,9 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ) ) .params( - "[{\"n1\" : {\"value\" : \"integer\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"short\" , \"kind\" : \"identifier\"}}, " - + "{\"n3\" : {\"value\" : \"double\" , \"kind\" : \"identifier\"}}," - + "{\"n4\" : {\"value\" : \"boolean\" , \"kind\" : \"identifier\"}}, " - + "{\"n5\" : {\"value\" : \"xx*\" , \"kind\" : \"pattern\"}}, " - + "{\"fn1\" : {\"value\" : \"max\" , \"kind\" : \"identifier\"}}]" + "[{\"n1\" : {\"identifier\" : \"integer\"}}, {\"n2\" : {\"identifier\" : \"short\"}}, " + + "{\"n3\" : {\"identifier\" : \"double\"}}, {\"n4\" : {\"identifier\" : \"boolean\"}}, " + + "{\"n5\" : {\"pattern\" : \"xx*\"}}, {\"fn1\" : {\"identifier\" : \"max\"}}]" ); Map result = runEsql(query); Map colA = Map.of("name", "boolean", "type", "boolean"); @@ -728,10 +725,7 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ResponseException.class, () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | {}", testIndexName(), command.getKey())) - .params( - "[{\"n1\" : {\"value\" : \"integer\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"short\" , \"kind\" : \"identifier\"}}]" - ) + .params("[{\"n1\" : {\"identifier\" : \"integer\"}}, {\"n2\" : {\"identifier\" : \"short\"}}]") ) ); error = re.getMessage(); @@ -751,9 +745,8 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | {}", testIndexName(), command.getKey())) .params( - "[{\"n1\" : {\"value\" : \"`n1`\" , \"kind\" : \"identifier\"}}," - + "{\"n2\" : {\"value\" : \"`n2`\" , \"kind\" : \"identifier\"}}, " - + "{\"n3\" : {\"value\" : \"`n3`\" , \"kind\" : \"identifier\"}}]" + "[{\"n1\" : {\"identifier\" : \"`n1`\"}}, {\"n2\" : {\"identifier\" : \"`n2`\"}}, " + + "{\"n3\" : {\"identifier\" : \"`n3`\"}}]" ) ) ); @@ -781,7 +774,7 @@ public void testNamedParamsForIdentifierAndIdentifierPatterns() throws IOExcepti ResponseException.class, () -> runEsqlSync( requestObjectBuilder().query(format(null, "from {} | ?cmd {}", testIndexName(), command.getValue())) - .params("[{\"cmd\" : {\"value\" : \"" + command.getKey() + "\", \"kind\" : \"identifier\"}}]") + .params("[{\"cmd\" : {\"identifier\" : \"" + command.getKey() + "\"}}]") ) ); error = re.getMessage(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index dfca6ab2bf814..f22ad07a4c6f6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -384,11 +384,6 @@ public enum Cap { */ DATE_DIFF_YEAR_CALENDARIAL, - /** - * Support named parameters for field names. - */ - NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(Build.current().isSnapshot()), - /** * Fix sorting not allowed on _source and counters. */ @@ -431,7 +426,12 @@ public enum Cap { /** * This enables 60_usage.yml "Basic ESQL usage....non-snapshot" version test. See also the previous capability. */ - NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false); + NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false), + + /** + * Support simplified syntax for named parameters for field and function names. + */ + NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX(Build.current().isSnapshot()); private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 71aface993ab9..d8904288523a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.xcontent.ObjectParser; @@ -90,19 +89,6 @@ String fields() { private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); - private enum ParamParsingKey { - VALUE, - KIND - } - - private static final Map paramParsingKeys = Maps.newMapWithExpectedSize(ParamParsingKey.values().length); - - static { - for (ParamParsingKey e : ParamParsingKey.values()) { - paramParsingKeys.put(e.name(), e); - } - } - /** Parses a synchronous request. */ static EsqlQueryRequest parseSync(XContentParser parser) { return SYNC_PARSER.apply(parser, null); @@ -180,25 +166,21 @@ private static QueryParams parseParams(XContentParser p) throws IOException { ); } for (Map.Entry entry : param.fields.entrySet()) { - ParserUtils.ParamClassification classification; + ParserUtils.ParamClassification classification = null; + paramValue = null; String paramName = entry.getKey(); checkParamNameValidity(paramName, errors, loc); - if (EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() - && entry.getValue() instanceof Map values) {// parameter specified as key:value pairs - Map paramElements = Maps.newMapWithExpectedSize(2); - for (Object keyName : values.keySet()) { - ParamParsingKey paramType = checkParamValueKeysValidity(keyName.toString(), errors, loc); - if (paramType != null) { - paramElements.put(paramType, values.get(keyName)); + if (EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() + && entry.getValue() instanceof Map value) {// parameter specified as a key:value pair + checkParamValueSize(paramName, value, loc, errors); + for (Object keyName : value.keySet()) { + classification = getParamClassification(keyName.toString(), errors, loc); + if (classification != null) { + paramValue = value.get(keyName); + checkParamValueValidity(classification, paramValue, loc, errors); } } - paramValue = paramElements.get(ParamParsingKey.VALUE); - if (paramValue == null && values.size() > 1) { // require non-null value for identifier and pattern - errors.add(new XContentParseException(loc, "[" + entry + "] does not have a value specified")); - } - - classification = getClassificationForParam(paramElements, loc, errors); } else {// parameter specifies as a value only paramValue = entry.getValue(); classification = VALUE; @@ -280,12 +262,45 @@ private static void checkParamNameValidity(String name, List paramValue, + XContentLocation loc, + List errors + ) { + if (paramValue.size() == 1) { + return; + } + String errorMessage; + if (paramValue.isEmpty()) { + errorMessage = " has no valid param attribute"; + } else { + errorMessage = " has multiple param attributes [" + + paramValue.keySet().stream().map(Object::toString).collect(Collectors.joining(", ")) + + "]"; + } + errors.add( + new XContentParseException( + loc, + "[" + + paramName + + "]" + + errorMessage + + ", only one of " + + Arrays.stream(ParserUtils.ParamClassification.values()) + .map(ParserUtils.ParamClassification::name) + .collect(Collectors.joining(", ")) + + " can be defined in a param" + ) + ); + } + + private static ParserUtils.ParamClassification getParamClassification( String paramKeyName, List errors, XContentLocation loc ) { - ParamParsingKey paramType = paramParsingKeys.get(paramKeyName.toUpperCase(Locale.ROOT)); + ParserUtils.ParamClassification paramType = paramClassifications.get(paramKeyName.toUpperCase(Locale.ROOT)); if (paramType == null) { errors.add( new XContentParseException( @@ -293,38 +308,21 @@ private static ParamParsingKey checkParamValueKeysValidity( "[" + paramKeyName + "] is not a valid param attribute, a valid attribute is any of " - + Arrays.stream(ParamParsingKey.values()).map(ParamParsingKey::name).collect(Collectors.joining(", ")) + + Arrays.stream(ParserUtils.ParamClassification.values()) + .map(ParserUtils.ParamClassification::name) + .collect(Collectors.joining(", ")) ) ); } return paramType; } - private static ParserUtils.ParamClassification getClassificationForParam( - Map paramElements, + private static void checkParamValueValidity( + ParserUtils.ParamClassification classification, + Object value, XContentLocation loc, List errors ) { - Object value = paramElements.get(ParamParsingKey.VALUE); - Object kind = paramElements.get(ParamParsingKey.KIND); - ParserUtils.ParamClassification classification = VALUE; - if (kind != null) { - classification = paramClassifications.get(kind.toString().toUpperCase(Locale.ROOT)); - if (classification == null) { - errors.add( - new XContentParseException( - loc, - "[" - + kind - + "] is not a valid param kind, a valid kind is any of " - + Arrays.stream(ParserUtils.ParamClassification.values()) - .map(ParserUtils.ParamClassification::name) - .collect(Collectors.joining(", ")) - ) - ); - } - } - // If a param is an "identifier" or a "pattern", validate it is a string. // If a param is a "pattern", validate it contains *. if (classification == IDENTIFIER || classification == PATTERN) { @@ -345,6 +343,5 @@ private static ParserUtils.ParamClassification getClassificationForParam( ); } } - return classification; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index 7deaff6ebe6bb..dcb83dadfcf96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -146,7 +146,7 @@ public void testNamedParams() throws IOException { public void testNamedParamsForIdentifiersPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); @@ -154,12 +154,12 @@ public void testNamedParamsForIdentifiersPatterns() throws IOException { QueryBuilder filter = randomQueryBuilder(); String paramsString = """ - ,"params":[ {"n1" : {"value" : "f1", "kind" : "Identifier"}}, - {"n2" : {"value" : "f1*", "Kind" : "identifier"}}, - {"n3" : {"value" : "f.1*", "KIND" : "Pattern"}}, - {"n4" : {"value" : "*", "kind" : "pattern"}}, - {"n5" : {"value" : "esql", "kind" : "Value"}}, - {"n_6" : {"value" : "null", "kind" : "identifier"}}, + ,"params":[ {"n1" : {"identifier" : "f1"}}, + {"n2" : {"Identifier" : "f1*"}}, + {"n3" : {"pattern" : "f.1*"}}, + {"n4" : {"Pattern" : "*"}}, + {"n5" : {"Value" : "esql"}}, + {"n_6" : {"identifier" : "null"}}, {"n7_" : {"value" : "f.1.1"}}] }"""; List params = List.of( @@ -262,7 +262,7 @@ public void testInvalidParams() throws IOException { public void testInvalidParamsForIdentifiersPatterns() throws IOException { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); String query = randomAlphaOfLengthBetween(1, 100); boolean columnar = randomBoolean(); @@ -271,13 +271,12 @@ public void testInvalidParamsForIdentifiersPatterns() throws IOException { // invalid named parameter for identifier and identifier pattern String paramsString1 = """ - "params":[ {"n1" : {"v" : "v1"}}, {"n2" : {"value" : "v2", "type" : "identifier"}}, - {"n3" : {"value" : "v3", "kind" : "id" }}, {"n4" : {"value" : "v4", "kind" : true}}, - {"n5" : {"value" : "v5", "kind" : ["identifier", "pattern"]}}, {"n6" : {"value" : "v6", "kind" : 0}}, - {"n7" : {"value" : 1, "kind" : "Identifier"}}, {"n8" : {"value" : true, "kind" : "Pattern"}}, - {"n9" : {"kind" : "identifier"}}, {"n10" : {"v" : "v10", "kind" : "identifier"}}, - {"n11" : {"value" : "v11", "kind" : "pattern"}}, {"n12" : {"value" : ["x", "y"], "kind" : "identifier"}}, - {"n13" : {"value" : "v13", "kind" : "identifier", "type" : "pattern"}}, {"n14" : {"v" : "v14", "kind" : "value"}}]"""; + "params":[{"n1" : {"v" : "v1"}}, {"n2" : {"identifier" : "v2", "pattern" : "v2"}}, + {"n3" : {"identifier" : "v3", "pattern" : "v3"}}, {"n4" : {"pattern" : "v4.1", "value" : "v4.2"}}, + {"n5" : {"value" : {"a5" : "v5"}}},{"n6" : {"identifier" : {"a6.1" : "v6.1", "a6.2" : "v6.2"}}}, {"n7" : {}}, + {"n8" : {"value" : ["x", "y"]}}, {"n9" : {"identifier" : ["x", "y"]}}, {"n10" : {"pattern" : ["x*", "y*"]}}, + {"n11" : {"identifier" : 1}}, {"n12" : {"pattern" : true}}, {"n13" : {"identifier" : null}}, {"n14" : {"pattern" : "v14"}}, + {"n15" : {"pattern" : "v15*"}, "n16" : {"identifier" : "v16"}}]"""; String json1 = String.format(Locale.ROOT, """ { %s @@ -291,28 +290,37 @@ public void testInvalidParamsForIdentifiersPatterns() throws IOException { assertThat( e1.getCause().getMessage(), containsString( - "Failed to parse params: [2:16] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[2:39] [type] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[3:1] [id] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[3:44] [true] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[4:1] [[identifier, pattern]] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[4:64] [0] is not a valid param kind, a valid kind is any of VALUE, IDENTIFIER, PATTERN; " - + "[5:1] [1] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " - + "[5:48] [true] is not a valid value for PATTERN parameter, " + "[2:15] [v] is not a valid param attribute, a valid attribute is any of VALUE, IDENTIFIER, PATTERN; " + + "[2:38] [n2] has multiple param attributes [identifier, pattern], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[2:38] [v2] is not a valid value for PATTERN parameter, " + "a valid value for PATTERN parameter is a string and contains *; " - + "[6:1] [null] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " - + "[6:35] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[6:35] [n10={v=v10, kind=identifier}] does not have a value specified; " - + "[6:35] [null] is not a valid value for IDENTIFIER parameter, " + + "[3:1] [n3] has multiple param attributes [identifier, pattern], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[3:1] [v3] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[3:51] [n4] has multiple param attributes [pattern, value], " + + "only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[3:51] [v4.1] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[4:1] n5={value={a5=v5}} is not supported as a parameter; " + + "[4:36] [{a6.1=v6.1, a6.2=v6.2}] is not a valid value for IDENTIFIER parameter, " + "a valid value for IDENTIFIER parameter is a string; " - + "[7:1] [v11] is not a valid value for PATTERN parameter, " + + "[4:36] n6={identifier={a6.1=v6.1, a6.2=v6.2}} is not supported as a parameter; " + + "[4:98] [n7] has no valid param attribute, only one of VALUE, IDENTIFIER, PATTERN can be defined in a param; " + + "[5:1] n8={value=[x, y]} is not supported as a parameter; " + + "[5:34] [[x, y]] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[5:34] n9={identifier=[x, y]} is not supported as a parameter; " + + "[5:72] [[x*, y*]] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[5:72] n10={pattern=[x*, y*]} is not supported as a parameter; " + + "[6:1] [1] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[6:31] [true] is not a valid value for PATTERN parameter, " + + "a valid value for PATTERN parameter is a string and contains *; " + + "[6:61] [null] is not a valid value for IDENTIFIER parameter, a valid value for IDENTIFIER parameter is a string; " + + "[6:94] [v14] is not a valid value for PATTERN parameter, " + "a valid value for PATTERN parameter is a string and contains *; " - + "[7:50] [[x, y]] is not a valid value for IDENTIFIER parameter," - + " a valid value for IDENTIFIER parameter is a string; " - + "[7:50] n12={kind=identifier, value=[x, y]} is not supported as a parameter; " - + "[8:1] [type] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[8:73] [v] is not a valid param attribute, a valid attribute is any of VALUE, KIND; " - + "[8:73] [n14={v=v14, kind=value}] does not have a value specified" + + "[7:1] Cannot parse more than one key:value pair as parameter, found [{n16:{identifier=v16}}, {n15:{pattern=v15*}}]" ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 3048686efbe44..c18f55a651408 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2087,7 +2087,7 @@ public void testCoalesceWithMixedNumericTypes() { public void testNamedParamsForIdentifiers() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); assertProjectionWithMapping( """ @@ -2181,7 +2181,7 @@ public void testNamedParamsForIdentifiers() { public void testInvalidNamedParamsForIdentifiers() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // missing field assertError( @@ -2254,7 +2254,7 @@ public void testInvalidNamedParamsForIdentifiers() { public void testNamedParamsForIdentifierPatterns() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); assertProjectionWithMapping( """ @@ -2288,7 +2288,7 @@ public void testNamedParamsForIdentifierPatterns() { public void testInvalidNamedParamsForIdentifierPatterns() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // missing pattern assertError( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 094d301875d8e..8019dbf77ffbf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1564,7 +1564,7 @@ public void testIntervalParam() { public void testParamForIdentifier() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // field names can appear in eval/where/stats/sort/keep/drop/rename/dissect/grok/enrich/mvexpand // eval, where @@ -1825,7 +1825,7 @@ public void testParamForIdentifier() { public void testParamForIdentifierPattern() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // name patterns can appear in keep and drop // all patterns @@ -1918,7 +1918,7 @@ public void testParamForIdentifierPattern() { public void testParamInInvalidPosition() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // param for pattern is not supported in eval/where/stats/sort/rename/dissect/grok/enrich/mvexpand // where/stats/sort/dissect/grok are covered in RestEsqlTestCase @@ -1973,7 +1973,7 @@ public void testParamInInvalidPosition() { public void testMissingParam() { assumeTrue( "named parameters for identifiers and patterns require snapshot build", - EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES.isEnabled() + EsqlCapabilities.Cap.NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES_SIMPLIFIED_SYNTAX.isEnabled() ); // cover all processing commands eval/where/stats/sort/rename/dissect/grok/enrich/mvexpand/keep/drop String error = "Unknown query parameter [f1], did you mean [f4]?"; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml index 4e8f82d507a5f..96145e84ad2cd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/10_basic.yml @@ -430,14 +430,14 @@ setup: - method: POST path: /_query parameters: [ ] - capabilities: [ named_parameter_for_field_and_function_names ] + capabilities: [ named_parameter_for_field_and_function_names_simplified_syntax ] reason: "named or positional parameters for field names" - do: esql.query: body: query: 'from test | stats x = count(?f1), y = sum(?f2) by ?f3 | sort ?f3 | keep ?f3, x, y | limit 3' - params: [{"f1" : {"value" : "time", "kind" : "identifier" }}, {"f2" : { "value" : "count", "kind" : "identifier" }}, {"f3" : { "value" : "color", "kind" : "identifier" }}] + params: [{"f1" : {"identifier" : "time"}}, {"f2" : { "identifier" : "count" }}, {"f3" : { "identifier" : "color"}}] - length: {columns: 3} - match: {columns.0.name: "color"} From 541bcf30e5d03944cace8deec24559fc63c8bcb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 24 Oct 2024 08:53:12 +0200 Subject: [PATCH 079/202] [DOCS] Documents that ELSER is the default service for `semantic_text` (#114615) Co-authored-by: Mike Pellegrini --- .../mapping/types/semantic-text.asciidoc | 26 ++++++++- .../semantic-search-semantic-text.asciidoc | 57 +++---------------- 2 files changed, 33 insertions(+), 50 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index ac23c153e01a3..893e2c6cff8ed 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -13,25 +13,47 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. +If you don't specify an inference endpoint, the <> is used by default. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. +If you use the ELSER service, you can set up `semantic_text` with the following API request: + [source,console] ------------------------------------------------------------ PUT my-index-000001 +{ + "mappings": { + "properties": { + "inference_field": { + "type": "semantic_text" + } + } + } +} +------------------------------------------------------------ + +NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. + +If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: + +[source,console] +------------------------------------------------------------ +PUT my-index-000002 { "mappings": { "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "inference_id": "my-openai-endpoint" <1> } } } } ------------------------------------------------------------ // TEST[skip:Requires inference endpoint] +<1> The `inference_id` of the {infer} endpoint to use to generate embeddings. The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. @@ -40,7 +62,7 @@ After creating dedicated {infer} endpoints for both, you can reference them usin [source,console] ------------------------------------------------------------ -PUT my-index-000002 +PUT my-index-000003 { "mappings": { "properties": { diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index 60692c19c184a..f881ca87a92e6 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -21,45 +21,11 @@ This tutorial uses the <> for demonstra [[semantic-text-requirements]] ==== Requirements -To use the `semantic_text` field type, you must have an {infer} endpoint deployed in -your cluster using the <>. +This tutorial uses the <> for demonstration, which is created automatically as needed. +To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. -[discrete] -[[semantic-text-infer-endpoint]] -==== Create the {infer} endpoint - -Create an inference endpoint by using the <>: +NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-endpoint <1> -{ - "service": "elser", <2> - "service_settings": { - "adaptive_allocations": { <3> - "enabled": true, - "min_number_of_allocations": 3, - "max_number_of_allocations": 10 - }, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `sparse_embedding` in the path as the `elser` service will -be used and ELSER creates sparse vectors. The `inference_id` is -`my-elser-endpoint`. -<2> The `elser` service is used in this example. -<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. -Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. - -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -If using the Python client, you can set the `timeout` parameter to a higher value. -==== [discrete] [[semantic-text-index-mapping]] @@ -75,8 +41,7 @@ PUT semantic-embeddings "mappings": { "properties": { "content": { <1> - "type": "semantic_text", <2> - "inference_id": "my-elser-endpoint" <3> + "type": "semantic_text" <2> } } } @@ -85,18 +50,14 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -<3> The `inference_id` is the inference endpoint you created in the previous step. -It will be used to generate the embeddings based on the input text. -Every time you ingest data into the related `semantic_text` field, this endpoint will be used for creating the vector representation of the text. +Since no `inference_id` is provided, the <> is used by default. +To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. [NOTE] ==== -If you're using web crawlers or connectors to generate indices, you have to -<> for these indices to -include the `semantic_text` field. Once the mapping is updated, you'll need to run -a full web crawl or a full connector sync. This ensures that all existing -documents are reprocessed and updated with the new semantic embeddings, -enabling semantic search on the updated data. +If you're using web crawlers or connectors to generate indices, you have to <> for these indices to include the `semantic_text` field. +Once the mapping is updated, you'll need to run a full web crawl or a full connector sync. +This ensures that all existing documents are reprocessed and updated with the new semantic embeddings, enabling semantic search on the updated data. ==== From bffaabb6f5c185d6e1003dd08029567ba469fe79 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 24 Oct 2024 09:19:46 +0200 Subject: [PATCH 080/202] ES|QL: improve docs about escaping for GROK, DISSECT, LIKE, RLIKE (#115320) --- ...ql-process-data-with-dissect-grok.asciidoc | 31 ++++++---- .../functions/kibana/definition/like.json | 2 +- .../functions/kibana/definition/rlike.json | 2 +- .../esql/functions/kibana/docs/like.md | 2 +- .../esql/functions/kibana/docs/rlike.md | 2 +- docs/reference/esql/functions/like.asciidoc | 16 ++++++ docs/reference/esql/functions/rlike.asciidoc | 16 ++++++ .../src/main/resources/docs.csv-spec | 42 +++++++++----- .../src/main/resources/string.csv-spec | 56 +++++++++++++++++++ .../function/scalar/string/RLike.java | 18 +++++- .../function/scalar/string/WildcardLike.java | 18 +++++- 11 files changed, 175 insertions(+), 30 deletions(-) diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index 87748fee4f202..e626e058a4e56 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -40,7 +40,7 @@ delimiter-based pattern, and extracts the specified keys as columns. For example, the following pattern: [source,txt] ---- -%{clientip} [%{@timestamp}] %{status} +%{clientip} [%{@timestamp}] %{status} ---- matches a log line of this format: @@ -76,8 +76,8 @@ ignore certain fields, append fields, skip over padding, etc. ===== Terminology dissect pattern:: -the set of fields and delimiters describing the textual -format. Also known as a dissection. +the set of fields and delimiters describing the textual +format. Also known as a dissection. The dissection is described using a set of `%{}` sections: `%{a} - %{b} - %{c}` @@ -91,14 +91,14 @@ Any set of characters other than `%{`, `'not }'`, or `}` is a delimiter. key:: + -- -the text between the `%{` and `}`, exclusive of the `?`, `+`, `&` prefixes -and the ordinal suffix. +the text between the `%{` and `}`, exclusive of the `?`, `+`, `&` prefixes +and the ordinal suffix. Examples: -* `%{?aaa}` - the key is `aaa` -* `%{+bbb/3}` - the key is `bbb` -* `%{&ccc}` - the key is `ccc` +* `%{?aaa}` - the key is `aaa` +* `%{+bbb/3}` - the key is `bbb` +* `%{&ccc}` - the key is `ccc` -- [[esql-dissect-examples]] @@ -218,7 +218,7 @@ Putting it together as an {esql} query: [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] +include::{esql-specs}/docs.csv-spec[tag=grokWithEscapeTripleQuotes] ---- `GROK` adds the following columns to the input table: @@ -239,15 +239,24 @@ with a `\`. For example, in the earlier pattern: %{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status} ---- -In {esql} queries, the backslash character itself is a special character that +In {esql} queries, when using single quotes for strings, the backslash character itself is a special character that needs to be escaped with another `\`. For this example, the corresponding {esql} query becomes: [source.merge.styled,esql] ---- include::{esql-specs}/docs.csv-spec[tag=grokWithEscape] ---- + +For this reason, in general it is more convenient to use triple quotes `"""` for GROK patterns, +that do not require escaping for backslash. + +[source.merge.styled,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=grokWithEscapeTripleQuotes] +---- ==== + [[esql-grok-patterns]] ===== Grok patterns @@ -318,4 +327,4 @@ as the `GROK` command. The `GROK` command does not support configuring <>, or <>. The `GROK` command is not subject to <>. -// end::grok-limitations[] \ No newline at end of file +// end::grok-limitations[] diff --git a/docs/reference/esql/functions/kibana/definition/like.json b/docs/reference/esql/functions/kibana/definition/like.json index 97e84e0361fd2..f375c697bd60d 100644 --- a/docs/reference/esql/functions/kibana/definition/like.json +++ b/docs/reference/esql/functions/kibana/definition/like.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "FROM employees\n| WHERE first_name LIKE \"?b*\"\n| KEEP first_name, last_name" + "FROM employees\n| WHERE first_name LIKE \"\"\"?b*\"\"\"\n| KEEP first_name, last_name" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/rlike.json b/docs/reference/esql/functions/kibana/definition/rlike.json index e442bb2c55050..7a328293383bb 100644 --- a/docs/reference/esql/functions/kibana/definition/rlike.json +++ b/docs/reference/esql/functions/kibana/definition/rlike.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "FROM employees\n| WHERE first_name RLIKE \".leja.*\"\n| KEEP first_name, last_name" + "FROM employees\n| WHERE first_name RLIKE \"\"\".leja.*\"\"\"\n| KEEP first_name, last_name" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/like.md b/docs/reference/esql/functions/kibana/docs/like.md index 4c400bdc65479..ea2ac11b6f4b9 100644 --- a/docs/reference/esql/functions/kibana/docs/like.md +++ b/docs/reference/esql/functions/kibana/docs/like.md @@ -15,6 +15,6 @@ The following wildcard characters are supported: ``` FROM employees -| WHERE first_name LIKE "?b*" +| WHERE first_name LIKE """?b*""" | KEEP first_name, last_name ``` diff --git a/docs/reference/esql/functions/kibana/docs/rlike.md b/docs/reference/esql/functions/kibana/docs/rlike.md index ed94553e7e44f..95b57799ffe29 100644 --- a/docs/reference/esql/functions/kibana/docs/rlike.md +++ b/docs/reference/esql/functions/kibana/docs/rlike.md @@ -10,6 +10,6 @@ expression. The right-hand side of the operator represents the pattern. ``` FROM employees -| WHERE first_name RLIKE ".leja.*" +| WHERE first_name RLIKE """.leja.*""" | KEEP first_name, last_name ``` diff --git a/docs/reference/esql/functions/like.asciidoc b/docs/reference/esql/functions/like.asciidoc index 2298617be5699..a569896bc3c1e 100644 --- a/docs/reference/esql/functions/like.asciidoc +++ b/docs/reference/esql/functions/like.asciidoc @@ -23,4 +23,20 @@ include::{esql-specs}/docs.csv-spec[tag=like] |=== include::{esql-specs}/docs.csv-spec[tag=like-result] |=== + +Matching the exact characters `*` and `.` will require escaping. +The escape character is backslash `\`. Since also backslash is a special character in string literals, +it will require further escaping. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=likeEscapingSingleQuotes] +---- + +To reduce the overhead of escaping, we suggest using triple quotes strings `"""` + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=likeEscapingTripleQuotes] +---- // end::body[] diff --git a/docs/reference/esql/functions/rlike.asciidoc b/docs/reference/esql/functions/rlike.asciidoc index 031594ae403da..f6009b2c49528 100644 --- a/docs/reference/esql/functions/rlike.asciidoc +++ b/docs/reference/esql/functions/rlike.asciidoc @@ -18,4 +18,20 @@ include::{esql-specs}/docs.csv-spec[tag=rlike] |=== include::{esql-specs}/docs.csv-spec[tag=rlike-result] |=== + +Matching special characters (eg. `.`, `*`, `(`...) will require escaping. +The escape character is backslash `\`. Since also backslash is a special character in string literals, +it will require further escaping. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=rlikeEscapingSingleQuotes] +---- + +To reduce the overhead of escaping, we suggest using triple quotes strings `"""` + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=rlikeEscapingTripleQuotes] +---- // end::body[] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 15fe6853ae491..a9c5a5214f159 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -382,7 +382,7 @@ count:long | languages:integer basicGrok // tag::basicGrok[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" | KEEP date, ip, email, num // end::basicGrok[] ; @@ -396,7 +396,7 @@ date:keyword | ip:keyword | email:keyword | num:keyword grokWithConversionSuffix // tag::grokWithConversionSuffix[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num // end::grokWithConversionSuffix[] ; @@ -410,7 +410,7 @@ date:keyword | ip:keyword | email:keyword | num:integer grokWithToDatetime // tag::grokWithToDatetime[] ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num | EVAL date = TO_DATETIME(date) // end::grokWithToDatetime[] @@ -436,11 +436,27 @@ ROW a = "1.2.3.4 [2023-01-23T12:15:00.000Z] Connected" // end::grokWithEscape-result[] ; + +grokWithEscapeTripleQuotes +// tag::grokWithEscapeTripleQuotes[] +ROW a = "1.2.3.4 [2023-01-23T12:15:00.000Z] Connected" +| GROK a """%{IP:ip} \[%{TIMESTAMP_ISO8601:@timestamp}\] %{GREEDYDATA:status}""" +// end::grokWithEscapeTripleQuotes[] +| KEEP @timestamp +; + +// tag::grokWithEscapeTripleQuotes-result[] +@timestamp:keyword +2023-01-23T12:15:00.000Z +// end::grokWithEscapeTripleQuotes-result[] +; + + grokWithDuplicateFieldNames // tag::grokWithDuplicateFieldNames[] FROM addresses | KEEP city.name, zip_code -| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}" +| GROK zip_code """%{WORD:zip_parts} %{WORD:zip_parts}""" // end::grokWithDuplicateFieldNames[] | SORT city.name ; @@ -456,7 +472,7 @@ Tokyo | 100-7014 | null basicDissect // tag::basicDissect[] ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a "%{date} - %{msg} - %{ip}" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip // end::basicDissect[] ; @@ -470,7 +486,7 @@ date:keyword | msg:keyword | ip:keyword dissectWithToDatetime // tag::dissectWithToDatetime[] ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a "%{date} - %{msg} - %{ip}" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip | EVAL date = TO_DATETIME(date) // end::dissectWithToDatetime[] @@ -485,7 +501,7 @@ some text | 127.0.0.1 | 2023-01-23T12:15:00.000Z dissectRightPaddingModifier // tag::dissectRightPaddingModifier[] ROW message="1998-08-10T17:15:42 WARN" -| DISSECT message "%{ts->} %{level}" +| DISSECT message """%{ts->} %{level}""" // end::dissectRightPaddingModifier[] ; @@ -498,7 +514,7 @@ message:keyword | ts:keyword | level:keyword dissectEmptyRightPaddingModifier#[skip:-8.11.2, reason:Support for empty right padding modifiers introduced in 8.11.2] // tag::dissectEmptyRightPaddingModifier[] ROW message="[1998-08-10T17:15:42] [WARN]" -| DISSECT message "[%{ts}]%{->}[%{level}]" +| DISSECT message """[%{ts}]%{->}[%{level}]""" // end::dissectEmptyRightPaddingModifier[] ; @@ -511,7 +527,7 @@ ROW message="[1998-08-10T17:15:42] [WARN]" dissectAppendModifier // tag::dissectAppendModifier[] ROW message="john jacob jingleheimer schmidt" -| DISSECT message "%{+name} %{+name} %{+name} %{+name}" APPEND_SEPARATOR=" " +| DISSECT message """%{+name} %{+name} %{+name} %{+name}""" APPEND_SEPARATOR=" " // end::dissectAppendModifier[] ; @@ -524,7 +540,7 @@ john jacob jingleheimer schmidt|john jacob jingleheimer schmidt dissectAppendWithOrderModifier // tag::dissectAppendWithOrderModifier[] ROW message="john jacob jingleheimer schmidt" -| DISSECT message "%{+name/2} %{+name/4} %{+name/3} %{+name/1}" APPEND_SEPARATOR="," +| DISSECT message """%{+name/2} %{+name/4} %{+name/3} %{+name/1}""" APPEND_SEPARATOR="," // end::dissectAppendWithOrderModifier[] ; @@ -537,7 +553,7 @@ john jacob jingleheimer schmidt|schmidt,john,jingleheimer,jacob dissectNamedSkipKey // tag::dissectNamedSkipKey[] ROW message="1.2.3.4 - - 30/Apr/1998:22:00:52 +0000" -| DISSECT message "%{clientip} %{?ident} %{?auth} %{@timestamp}" +| DISSECT message """%{clientip} %{?ident} %{?auth} %{@timestamp}""" // end::dissectNamedSkipKey[] ; @@ -550,7 +566,7 @@ message:keyword | clientip:keyword | @timestamp:keyword docsLike // tag::like[] FROM employees -| WHERE first_name LIKE "?b*" +| WHERE first_name LIKE """?b*""" | KEEP first_name, last_name // end::like[] | SORT first_name @@ -566,7 +582,7 @@ Eberhardt |Terkki docsRlike // tag::rlike[] FROM employees -| WHERE first_name RLIKE ".leja.*" +| WHERE first_name RLIKE """.leja.*""" | KEEP first_name, last_name // end::rlike[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index dd9d519649c01..00fa2fddb2106 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1800,3 +1800,59 @@ warning:Line 1:29: java.lang.IllegalArgumentException: single-value function enc x:keyword null ; + + +likeEscapingSingleQuotes +// tag::likeEscapingSingleQuotes[] +ROW message = "foo * bar" +| WHERE message LIKE "foo \\* bar" +// end::likeEscapingSingleQuotes[] +; + +// tag::likeEscapingSingleQuotes-result[] +message:keyword +foo * bar +// end::likeEscapingSingleQuotes-result[] +; + + +likeEscapingTripleQuotes +// tag::likeEscapingTripleQuotes[] +ROW message = "foo * bar" +| WHERE message LIKE """foo \* bar""" +// end::likeEscapingTripleQuotes[] +; + +// tag::likeEscapingTripleQuotes-result[] +message:keyword +foo * bar +// end::likeEscapingTripleQuotes-result[] +; + + +rlikeEscapingSingleQuotes +// tag::rlikeEscapingSingleQuotes[] +ROW message = "foo ( bar" +| WHERE message RLIKE "foo \\( bar" +// end::rlikeEscapingSingleQuotes[] +; + +// tag::rlikeEscapingSingleQuotes-result[] +message:keyword +foo ( bar +// end::rlikeEscapingSingleQuotes-result[] +; + + +rlikeEscapingTripleQuotes +// tag::rlikeEscapingTripleQuotes[] +ROW message = "foo ( bar" +| WHERE message RLIKE """foo \( bar""" +// end::rlikeEscapingTripleQuotes[] +; + +// tag::rlikeEscapingTripleQuotes-result[] +message:keyword +foo ( bar +// end::rlikeEscapingTripleQuotes-result[] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java index b46c46c89deba..cd42711177510 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RLike.java @@ -33,7 +33,23 @@ public class RLike extends org.elasticsearch.xpack.esql.core.expression.predicat Use `RLIKE` to filter data based on string patterns using using <>. `RLIKE` usually acts on a field placed on the left-hand side of the operator, but it can also act on a constant (literal) - expression. The right-hand side of the operator represents the pattern.""", examples = @Example(file = "docs", tag = "rlike")) + expression. The right-hand side of the operator represents the pattern.""", detailedDescription = """ + Matching special characters (eg. `.`, `*`, `(`...) will require escaping. + The escape character is backslash `\\`. Since also backslash is a special character in string literals, + it will require further escaping. + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=rlikeEscapingSingleQuotes] + ---- + + To reduce the overhead of escaping, we suggest using triple quotes strings `\"\"\"` + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=rlikeEscapingTripleQuotes] + ---- + """, examples = @Example(file = "docs", tag = "rlike")) public RLike( Source source, @Param(name = "str", type = { "keyword", "text" }, description = "A literal value.") Expression value, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java index 714c4ca04a862..c1b4f20f41795 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/WildcardLike.java @@ -43,7 +43,23 @@ also act on a constant (literal) expression. The right-hand side of the operator The following wildcard characters are supported: * `*` matches zero or more characters. - * `?` matches one character.""", examples = @Example(file = "docs", tag = "like")) + * `?` matches one character.""", detailedDescription = """ + Matching the exact characters `*` and `.` will require escaping. + The escape character is backslash `\\`. Since also backslash is a special character in string literals, + it will require further escaping. + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=likeEscapingSingleQuotes] + ---- + + To reduce the overhead of escaping, we suggest using triple quotes strings `\"\"\"` + + [source.merge.styled,esql] + ---- + include::{esql-specs}/string.csv-spec[tag=likeEscapingTripleQuotes] + ---- + """, examples = @Example(file = "docs", tag = "like")) public WildcardLike( Source source, @Param(name = "str", type = { "keyword", "text" }, description = "A literal expression.") Expression left, From 6f7bd550b17cbaf7d11acf68d0aacfa1d569f7c8 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:34:12 +0300 Subject: [PATCH 081/202] Use settings from LogsdbIndexModeSettingsProvider in SyntheticSourceIndexSettingsProvider (#115437) * Use settings from LogsdbIndexModeSettingsProvider in SyntheticSourceIndexSettingsProvider * update --- .../xpack/logsdb/LogsdbWithBasicRestIT.java | 32 ++++++++ .../xpack/logsdb/LogsdbRestIT.java | 39 ++++++++-- .../xpack/logsdb/LogsDBPlugin.java | 2 +- .../LogsdbIndexModeSettingsProvider.java | 18 ++--- .../SyntheticSourceIndexSettingsProvider.java | 13 +++- ...heticSourceIndexSettingsProviderTests.java | 75 ++++++++++++++++++- 6 files changed, 159 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java index f5ac107628d1a..381c83ceee289 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java +++ b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.logsdb; import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; @@ -171,4 +172,35 @@ public void testLogsdbOverrideNullInTemplate() throws IOException { assertEquals("logsdb", settings.get("index.mode")); assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } + + public void testLogsdbOverrideDefaultModeForLogsIndex() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-test-*"], + "data_stream": { + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/logs-test-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("logs-test-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index 16759c3292f7a..2bf8b00cf551c 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -35,12 +37,6 @@ protected String getTestRestCluster() { } public void testFeatureUsageWithLogsdbIndex() throws IOException { - { - var response = getAsMap("/_license/feature_usage"); - @SuppressWarnings("unchecked") - List> features = (List>) response.get("features"); - assertThat(features, Matchers.empty()); - } { if (randomBoolean()) { createIndex("test-index", Settings.builder().put("index.mode", "logsdb").build()); @@ -81,4 +77,35 @@ public void testFeatureUsageWithLogsdbIndex() throws IOException { } } + public void testLogsdbSourceModeForLogsIndex() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["logs-test-*"], + "data_stream": { + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/logs-test-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("logs-test-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertNull(settings.get("index.mapping.source.mode")); + } + } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 49a83335671cd..089be0604146f 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -52,7 +52,7 @@ public Collection getAdditionalIndexSettingProviders(Index return List.of(logsdbIndexModeSettingsProvider); } return List.of( - new SyntheticSourceIndexSettingsProvider(licenseService, parameters.mapperServiceFactory()), + new SyntheticSourceIndexSettingsProvider(licenseService, parameters.mapperServiceFactory(), logsdbIndexModeSettingsProvider), logsdbIndexModeSettingsProvider ); } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index ee9d6129dcd54..329cd3bc8a04b 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -48,19 +48,16 @@ public Settings getAdditionalIndexSettings( final Settings settings, final List combinedTemplateMappings ) { - if (isLogsdbEnabled == false || dataStreamName == null) { - return Settings.EMPTY; - } - - final IndexMode indexMode = resolveIndexMode(settings.get(IndexSettings.MODE.getKey())); - if (indexMode != null) { - return Settings.EMPTY; - } + return getLogsdbModeSetting(dataStreamName, settings); + } - if (matchesLogsPattern(dataStreamName)) { + Settings getLogsdbModeSetting(final String dataStreamName, final Settings settings) { + if (isLogsdbEnabled + && dataStreamName != null + && resolveIndexMode(settings.get(IndexSettings.MODE.getKey())) == null + && matchesLogsPattern(dataStreamName)) { return Settings.builder().put("index.mode", IndexMode.LOGSDB.getName()).build(); } - return Settings.EMPTY; } @@ -71,5 +68,4 @@ private static boolean matchesLogsPattern(final String name) { private IndexMode resolveIndexMode(final String mode) { return mode != null ? Enum.valueOf(IndexMode.class, mode.toUpperCase(Locale.ROOT)) : null; } - } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index 4625fe91294d7..e7572d6a646e1 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -38,13 +38,16 @@ final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider private final SyntheticSourceLicenseService syntheticSourceLicenseService; private final CheckedFunction mapperServiceFactory; + private final LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider; SyntheticSourceIndexSettingsProvider( SyntheticSourceLicenseService syntheticSourceLicenseService, - CheckedFunction mapperServiceFactory + CheckedFunction mapperServiceFactory, + LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider ) { this.syntheticSourceLicenseService = syntheticSourceLicenseService; this.mapperServiceFactory = mapperServiceFactory; + this.logsdbIndexModeSettingsProvider = logsdbIndexModeSettingsProvider; } @Override @@ -63,6 +66,14 @@ public Settings getAdditionalIndexSettings( Settings indexTemplateAndCreateRequestSettings, List combinedTemplateMappings ) { + var logsdbSettings = logsdbIndexModeSettingsProvider.getLogsdbModeSetting(dataStreamName, indexTemplateAndCreateRequestSettings); + if (logsdbSettings != Settings.EMPTY) { + indexTemplateAndCreateRequestSettings = Settings.builder() + .put(logsdbSettings) + .put(indexTemplateAndCreateRequestSettings) + .build(); + } + // This index name is used when validating component and index templates, we should skip this check in that case. // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) boolean isTemplateValidation = "validate-index-name".equals(indexName); diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java index 362b387726105..2ab77b38b3373 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.license.MockLicenseState; @@ -35,6 +36,10 @@ public class SyntheticSourceIndexSettingsProviderTests extends ESTestCase { private SyntheticSourceLicenseService syntheticSourceLicenseService; private SyntheticSourceIndexSettingsProvider provider; + private static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { + return new LogsdbIndexModeSettingsProvider(Settings.builder().put("cluster.logsdb.enabled", enabled).build()); + } + @Before public void setup() { MockLicenseState licenseState = mock(MockLicenseState.class); @@ -46,7 +51,8 @@ public void setup() { provider = new SyntheticSourceIndexSettingsProvider( syntheticSourceLicenseService, - im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()) + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false) ); } @@ -310,4 +316,71 @@ public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSource() throws assertThat(result.size(), equalTo(1)); assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); } + + public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch() throws IOException { + syntheticSourceLicenseService.setSyntheticSourceFallback(true); + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(true) + ); + final Settings settings = Settings.EMPTY; + + String dataStreamName = "logs-app1"; + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + Metadata metadata = mb.build(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(0)); + + dataStreamName = "logs-app1-0"; + mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + Instant.now().toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + metadata = mb.build(); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(1)); + assertEquals(SourceFieldMapper.Mode.STORED, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(result)); + + result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + null, + metadata, + Instant.ofEpochMilli(1L), + builder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString()).build(), + List.of() + ); + assertThat(result.size(), equalTo(0)); + } } From cc9a08a7085e87975d780bb1c2f5c698dbe19b4e Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Thu, 24 Oct 2024 18:43:51 +1100 Subject: [PATCH 082/202] Only publish desired balance gauges on master (#115383) Closes ES-9834 --- docs/changelog/115383.yaml | 5 + .../DesiredBalanceReconcilerMetricsIT.java | 69 ++++++++++ .../allocator/DesiredBalanceMetrics.java | 118 ++++++++++++++++++ .../allocator/DesiredBalanceReconciler.java | 56 +-------- .../DesiredBalanceShardsAllocator.java | 19 +-- .../allocator/DesiredBalanceMetricsTests.java | 116 +++++++++++++++++ .../DesiredBalanceReconcilerTests.java | 7 +- 7 files changed, 326 insertions(+), 64 deletions(-) create mode 100644 docs/changelog/115383.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java diff --git a/docs/changelog/115383.yaml b/docs/changelog/115383.yaml new file mode 100644 index 0000000000000..19eadd41c0726 --- /dev/null +++ b/docs/changelog/115383.yaml @@ -0,0 +1,5 @@ +pr: 115383 +summary: Only publish desired balance gauges on master +area: Allocation +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java new file mode 100644 index 0000000000000..cb279c93b402e --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import java.util.Collection; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.not; + +public class DesiredBalanceReconcilerMetricsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestTelemetryPlugin.class); + } + + public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("test").setSettings(indexSettings(2, 1)).get(); + ensureGreen(); + + assertOnlyMasterIsPublishingMetrics(); + + // fail over and check again + int numFailOvers = randomIntBetween(1, 3); + for (int i = 0; i < numFailOvers; i++) { + internalCluster().restartNode(internalCluster().getMasterName()); + ensureGreen(); + + assertOnlyMasterIsPublishingMetrics(); + } + } + + private static void assertOnlyMasterIsPublishingMetrics() { + String masterNodeName = internalCluster().getMasterName(); + String[] nodeNames = internalCluster().getNodeNames(); + for (String nodeName : nodeNames) { + assertMetricsAreBeingPublished(nodeName, nodeName.equals(masterNodeName)); + } + } + + private static void assertMetricsAreBeingPublished(String nodeName, boolean shouldBePublishing) { + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + testTelemetryPlugin.resetMeter(); + testTelemetryPlugin.collect(); + Matcher> matcher = shouldBePublishing ? not(empty()) : empty(); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), matcher); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java new file mode 100644 index 0000000000000..436f1ac38c0c2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.List; + +public class DesiredBalanceMetrics { + + public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); + public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; + public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; + public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; + public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + + private volatile boolean nodeIsMaster = false; + + /** + * Number of unassigned shards during last reconciliation + */ + private volatile long unassignedShards; + /** + * Total number of assigned shards during last reconciliation + */ + private volatile long totalAllocations; + /** + * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved + */ + private volatile long undesiredAllocations; + + public void updateMetrics(long unassignedShards, long totalAllocations, long undesiredAllocations) { + this.unassignedShards = unassignedShards; + this.totalAllocations = totalAllocations; + this.undesiredAllocations = undesiredAllocations; + } + + public DesiredBalanceMetrics(MeterRegistry meterRegistry) { + meterRegistry.registerLongsGauge( + UNASSIGNED_SHARDS_METRIC_NAME, + "Current number of unassigned shards", + "{shard}", + this::getUnassignedShardsMetrics + ); + meterRegistry.registerLongsGauge(TOTAL_SHARDS_METRIC_NAME, "Total number of shards", "{shard}", this::getTotalAllocationsMetrics); + meterRegistry.registerLongsGauge( + UNDESIRED_ALLOCATION_COUNT_METRIC_NAME, + "Total number of shards allocated on undesired nodes excluding shutting down nodes", + "{shard}", + this::getUndesiredAllocationsMetrics + ); + meterRegistry.registerDoublesGauge( + UNDESIRED_ALLOCATION_RATIO_METRIC_NAME, + "Ratio of undesired allocations to shard count excluding shutting down nodes", + "1", + this::getUndesiredAllocationsRatioMetrics + ); + } + + public void setNodeIsMaster(boolean nodeIsMaster) { + this.nodeIsMaster = nodeIsMaster; + } + + public long unassignedShards() { + return unassignedShards; + } + + public long totalAllocations() { + return totalAllocations; + } + + public long undesiredAllocations() { + return undesiredAllocations; + } + + private List getUnassignedShardsMetrics() { + return getIfPublishing(unassignedShards); + } + + private List getTotalAllocationsMetrics() { + return getIfPublishing(totalAllocations); + } + + private List getUndesiredAllocationsMetrics() { + return getIfPublishing(undesiredAllocations); + } + + private List getIfPublishing(long value) { + if (nodeIsMaster) { + return List.of(new LongWithAttributes(value)); + } + return List.of(); + } + + private List getUndesiredAllocationsRatioMetrics() { + if (nodeIsMaster) { + var total = totalAllocations; + var undesired = undesiredAllocations; + return List.of(new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0)); + } + return List.of(); + } + + public void zeroAllMetrics() { + unassignedShards = 0; + totalAllocations = 0; + undesiredAllocations = 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index da52148919cdb..dced9214a3245 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -30,10 +30,6 @@ import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.telemetry.metric.DoubleGauge; -import org.elasticsearch.telemetry.metric.DoubleWithAttributes; -import org.elasticsearch.telemetry.metric.LongGaugeMetric; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; @@ -73,23 +69,10 @@ public class DesiredBalanceReconciler { private double undesiredAllocationsLogThreshold; private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); + private final DesiredBalanceMetrics desiredBalanceMetrics; - // stats - /** - * Number of unassigned shards during last reconciliation - */ - protected final LongGaugeMetric unassignedShards; - /** - * Total number of assigned shards during last reconciliation - */ - protected final LongGaugeMetric totalAllocations; - /** - * Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved - */ - protected final LongGaugeMetric undesiredAllocations; - private final DoubleGauge undesiredAllocationsRatio; - - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, MeterRegistry meterRegistry) { + public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, DesiredBalanceMetrics desiredBalanceMetrics) { + this.desiredBalanceMetrics = desiredBalanceMetrics; this.undesiredAllocationLogInterval = new FrequencyCappedAction( threadPool.relativeTimeInMillisSupplier(), TimeValue.timeValueMinutes(5) @@ -99,35 +82,6 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); - - unassignedShards = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.shards.unassigned.current", - "Current number of unassigned shards", - "{shard}" - ); - totalAllocations = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.shards.current", - "Total number of shards", - "{shard}" - ); - undesiredAllocations = LongGaugeMetric.create( - meterRegistry, - "es.allocator.desired_balance.allocations.undesired.current", - "Total number of shards allocated on undesired nodes excluding shutting down nodes", - "{shard}" - ); - undesiredAllocationsRatio = meterRegistry.registerDoubleGauge( - "es.allocator.desired_balance.allocations.undesired.ratio", - "Ratio of undesired allocations to shard count excluding shutting down nodes", - "1", - () -> { - var total = totalAllocations.get(); - var undesired = undesiredAllocations.get(); - return new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0); - } - ); } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -578,9 +532,7 @@ private void balance() { } } - DesiredBalanceReconciler.this.unassignedShards.set(unassignedShards); - DesiredBalanceReconciler.this.undesiredAllocations.set(undesiredAllocationsExcludingShuttingDownNodes); - DesiredBalanceReconciler.this.totalAllocations.set(totalAllocations); + desiredBalanceMetrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocationsExcludingShuttingDownNodes); maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocationsExcludingShuttingDownNodes, routingNodes.size()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index ba16915f2ad2b..4171100191211 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -64,6 +64,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator { private volatile DesiredBalance currentDesiredBalance = DesiredBalance.INITIAL; private volatile boolean resetCurrentDesiredBalance = false; private final Set processedNodeShutdowns = new HashSet<>(); + private final DesiredBalanceMetrics desiredBalanceMetrics; // stats protected final CounterMetric computationsSubmitted = new CounterMetric(); @@ -104,6 +105,7 @@ public DesiredBalanceShardsAllocator( DesiredBalanceReconcilerAction reconciler, TelemetryProvider telemetryProvider ) { + this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.delegateAllocator = delegateAllocator; this.threadPool = threadPool; this.reconciler = reconciler; @@ -111,7 +113,7 @@ public DesiredBalanceShardsAllocator( this.desiredBalanceReconciler = new DesiredBalanceReconciler( clusterService.getClusterSettings(), threadPool, - telemetryProvider.getMeterRegistry() + desiredBalanceMetrics ); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { @@ -168,6 +170,10 @@ public String toString() { if (event.localNodeMaster() == false) { onNoLongerMaster(); } + // Only update on change, to minimise volatile writes + if (event.localNodeMaster() != event.previousState().nodes().isLocalNodeElectedMaster()) { + desiredBalanceMetrics.setNodeIsMaster(event.localNodeMaster()); + } }); } @@ -306,9 +312,9 @@ public DesiredBalanceStats getStats() { computedShardMovements.sum(), cumulativeComputationTime.count(), cumulativeReconciliationTime.count(), - desiredBalanceReconciler.unassignedShards.get(), - desiredBalanceReconciler.totalAllocations.get(), - desiredBalanceReconciler.undesiredAllocations.get() + desiredBalanceMetrics.unassignedShards(), + desiredBalanceMetrics.totalAllocations(), + desiredBalanceMetrics.undesiredAllocations() ); } @@ -318,10 +324,7 @@ private void onNoLongerMaster() { queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); desiredBalanceReconciler.clear(); - - desiredBalanceReconciler.unassignedShards.set(0); - desiredBalanceReconciler.totalAllocations.set(0); - desiredBalanceReconciler.undesiredAllocations.set(0); + desiredBalanceMetrics.zeroAllMetrics(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java new file mode 100644 index 0000000000000..2c642da665051 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.RecordingMeterRegistry; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class DesiredBalanceMetricsTests extends ESTestCase { + + public void testZeroAllMetrics() { + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(MeterRegistry.NOOP); + long unassignedShards = randomNonNegativeLong(); + long totalAllocations = randomNonNegativeLong(); + long undesiredAllocations = randomNonNegativeLong(); + metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + assertEquals(totalAllocations, metrics.totalAllocations()); + assertEquals(unassignedShards, metrics.unassignedShards()); + assertEquals(undesiredAllocations, metrics.undesiredAllocations()); + metrics.zeroAllMetrics(); + assertEquals(0, metrics.totalAllocations()); + assertEquals(0, metrics.unassignedShards()); + assertEquals(0, metrics.undesiredAllocations()); + } + + public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { + RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); + + long unassignedShards = randomNonNegativeLong(); + long totalAllocations = randomLongBetween(100, 10000000); + long undesiredAllocations = randomLongBetween(0, totalAllocations); + metrics.updateMetrics(unassignedShards, totalAllocations, undesiredAllocations); + + // Collect when not master + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder().getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder().getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), + empty() + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME), + empty() + ); + + // Collect when master + metrics.setNodeIsMaster(true); + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_COUNT_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(undesiredAllocations) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.TOTAL_SHARDS_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(totalAllocations) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.LONG_GAUGE, DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME) + .getFirst() + .getLong(), + equalTo(unassignedShards) + ); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME) + .getFirst() + .getDouble(), + equalTo((double) undesiredAllocations / totalAllocations) + ); + } + + public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { + RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); + DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); + long unassignedShards = randomNonNegativeLong(); + metrics.updateMetrics(unassignedShards, 0, 0); + + metrics.setNodeIsMaster(true); + meterRegistry.getRecorder().collect(); + assertThat( + meterRegistry.getRecorder() + .getMeasurements(InstrumentType.DOUBLE_GAUGE, DesiredBalanceMetrics.UNDESIRED_ALLOCATION_RATIO_METRIC_NAME) + .getFirst() + .getDouble(), + equalTo(0d) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 1ae73c9c08137..b5f44ee9e505f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -66,7 +66,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.snapshots.SnapshotsInfoService; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; @@ -1215,7 +1214,7 @@ public void testRebalanceDoesNotCauseHotSpots() { var reconciler = new DesiredBalanceReconciler( clusterSettings, new DeterministicTaskQueue().getThreadPool(), - mock(MeterRegistry.class) + DesiredBalanceMetrics.NOOP ); var totalOutgoingMoves = new HashMap(); @@ -1297,7 +1296,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { final var timeInMillisSupplier = new AtomicLong(); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)); + var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP); final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); @@ -1349,7 +1348,7 @@ public void testShouldLogOnTooManyUndesiredAllocations() { private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { final var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); - new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, mock(MeterRegistry.class)).reconcile( + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP).reconcile( desiredBalance, routingAllocation ); From 6e67da52d1d3a26ede8065d9b33ed2173b081eed Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 10:02:47 +0200 Subject: [PATCH 083/202] =?UTF-8?q?[DOCS=E2=80=93=20Fix=20typoUpdate=20tra?= =?UTF-8?q?inedmodel.asciidoc=20(#115420)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/elastic/elasticsearch/issues/114968 --- docs/reference/cat/trainedmodel.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/trainedmodel.asciidoc b/docs/reference/cat/trainedmodel.asciidoc index 45c87038f5d64..5b20a0b6e842f 100644 --- a/docs/reference/cat/trainedmodel.asciidoc +++ b/docs/reference/cat/trainedmodel.asciidoc @@ -116,7 +116,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] [source,console] -------------------------------------------------- -GET _cat/ml/trained_models?h=c,o,l,ct,v&v=ture +GET _cat/ml/trained_models?h=c,o,l,ct,v&v=true -------------------------------------------------- // TEST[skip:kibana sample data] From a281d62988f190bb9e25361325e060e0c38d15cc Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 10:26:16 +0200 Subject: [PATCH 084/202] Remove auto_release_flood_stage_block property check (#114696) There's no option to disable the auto release of the write block when a node exceeds the flood-stage watermark. This property was deprecated in #45274 (8.0) --- .../routing/allocation/DiskThresholdSettings.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index d1d6a9761a758..57abbb8b8ed94 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Iterator; @@ -156,19 +155,6 @@ public class DiskThresholdSettings implements Writeable { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - static { - checkAutoReleaseIndexEnabled(); - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // this check is unnecessary in v9 - private static void checkAutoReleaseIndexEnabled() { - final String AUTO_RELEASE_INDEX_ENABLED_KEY = "es.disk.auto_release_flood_stage_block"; - final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property != null) { - throw new IllegalArgumentException("system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] may not be set"); - } - } - public DiskThresholdSettings(Settings settings, ClusterSettings clusterSettings) { setLowWatermark(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings)); setLowStageMaxHeadroom(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.get(settings)); From cc6e7415c1d8d145f1ae65d58dbb72c3a439d32e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 24 Oct 2024 09:44:45 +0100 Subject: [PATCH 085/202] Remove security bootstrap check that uses Version (#114923) --- .../elasticsearch/common/ReferenceDocs.java | 1 - .../org/elasticsearch/env/BuildVersion.java | 7 -- .../env/DefaultBuildVersion.java | 5 -- .../common/reference-docs-links.txt | 1 - .../xpack/security/Security.java | 12 ++- ...ecurityImplicitBehaviorBootstrapCheck.java | 67 ----------------- ...tyImplicitBehaviorBootstrapCheckTests.java | 73 ------------------- 7 files changed, 5 insertions(+), 161 deletions(-) delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 43acda1e1ec2d..926056fec3ec8 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -66,7 +66,6 @@ public enum ReferenceDocs { BOOTSTRAP_CHECK_ROLE_MAPPINGS, BOOTSTRAP_CHECK_TLS, BOOTSTRAP_CHECK_TOKEN_SSL, - BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, CONTACT_SUPPORT, UNASSIGNED_SHARDS, EXECUTABLE_JNA_TMPDIR, diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 42c45a14977eb..3fdf01d7e1bae 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -58,13 +58,6 @@ public abstract class BuildVersion { */ public abstract boolean isFutureVersion(); - // temporary - // TODO[wrb]: remove from security bootstrap checks - @Deprecated - public Version toVersion() { - return null; - } - /** * Create a {@link BuildVersion} from a version ID number. * diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index dcc5ed3aee3f8..f31b34e89c01d 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -52,11 +52,6 @@ public int id() { return versionId; } - @Override - public Version toVersion() { - return version; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt index 3b0816aabf4aa..f9a8237d63717 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt @@ -28,7 +28,6 @@ BOOTSTRAP_CHECK_PKI_REALM bootstrap-checks BOOTSTRAP_CHECK_ROLE_MAPPINGS bootstrap-checks-xpack.html#bootstrap-checks-xpack-role-mappings BOOTSTRAP_CHECK_TLS bootstrap-checks-xpack.html#bootstrap-checks-tls BOOTSTRAP_CHECK_TOKEN_SSL bootstrap-checks-xpack.html#bootstrap-checks-xpack-token-ssl -BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP security-minimal-setup.html CONTACT_SUPPORT troubleshooting.html#troubleshooting-contact-support UNASSIGNED_SHARDS red-yellow-cluster-status.html EXECUTABLE_JNA_TMPDIR executable-jna-tmpdir.html diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 8f32bcf7ace8a..0b387a738a2c5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -810,13 +810,11 @@ Collection createComponents( // We need to construct the checks here while the secure settings are still available. // If we wait until #getBoostrapChecks the secure settings will have been cleared/closed. final List checks = new ArrayList<>(); - checks.addAll( - Arrays.asList( - new TokenSSLBootstrapCheck(), - new PkiRealmBootstrapCheck(getSslService()), - new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, getLicenseService()), - new TransportTLSBootstrapCheck() - ) + Collections.addAll( + checks, + new TokenSSLBootstrapCheck(), + new PkiRealmBootstrapCheck(getSslService()), + new TransportTLSBootstrapCheck() ); checks.addAll(InternalRealms.getBootstrapChecks(settings, environment)); this.bootstrapChecks.set(Collections.unmodifiableList(checks)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java deleted file mode 100644 index 2d535100d468d..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.ReferenceDocs; -import org.elasticsearch.env.NodeMetadata; -import org.elasticsearch.license.ClusterStateLicenseService; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.xpack.core.XPackSettings; - -public class SecurityImplicitBehaviorBootstrapCheck implements BootstrapCheck { - - private final NodeMetadata nodeMetadata; - private final LicenseService licenseService; - - public SecurityImplicitBehaviorBootstrapCheck(NodeMetadata nodeMetadata, LicenseService licenseService) { - this.nodeMetadata = nodeMetadata; - this.licenseService = licenseService; - } - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - if (nodeMetadata == null) { - return BootstrapCheckResult.success(); - } - if (licenseService instanceof ClusterStateLicenseService clusterStateLicenseService) { - final License license = clusterStateLicenseService.getLicense(context.metadata()); - // TODO[wrb]: Add an "isCurrentMajor" method to BuildVersion? - final Version lastKnownVersion = nodeMetadata.previousNodeVersion().toVersion(); - // pre v7.2.0 nodes have Version.EMPTY and its id is 0, so Version#before handles this successfully - if (lastKnownVersion.before(Version.V_8_0_0) - && XPackSettings.SECURITY_ENABLED.exists(context.settings()) == false - && (license.operationMode() == License.OperationMode.BASIC || license.operationMode() == License.OperationMode.TRIAL)) { - return BootstrapCheckResult.failure( - "The default value for [" - + XPackSettings.SECURITY_ENABLED.getKey() - + "] has changed in the current version. " - + " Security features were implicitly disabled for this node but they would now be enabled, possibly" - + " preventing access to the node. " - + "See " - + this.referenceDocs() - + " to configure security, or explicitly disable security by " - + "setting [xpack.security.enabled] to \"false\" in elasticsearch.yml before restarting the node." - ); - } - } - return BootstrapCheckResult.success(); - } - - public boolean alwaysEnforce() { - return true; - } - - @Override - public ReferenceDocs referenceDocs() { - return ReferenceDocs.BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP; - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java deleted file mode 100644 index 85e8d6dd38125..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.env.BuildVersion; -import org.elasticsearch.env.NodeMetadata; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.license.ClusterStateLicenseService; -import org.elasticsearch.license.License; -import org.elasticsearch.license.LicensesMetadata; -import org.elasticsearch.license.TestUtils; -import org.elasticsearch.license.internal.TrialLicenseVersion; -import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.xpack.core.XPackSettings; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; - -public class SecurityImplicitBehaviorBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - - @UpdateForV9(owner = UpdateForV9.Owner.SECURITY) - @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") - public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext( - Settings.EMPTY, - createLicensesMetadata(TrialLicenseVersion.fromXContent(previousVersion.toString()), randomFrom("basic", "trial")) - ) - ); - assertThat(result.isSuccess(), is(true)); - } - - @UpdateForV9(owner = UpdateForV9.Owner.SECURITY) - @AwaitsFix(bugUrl = "requires updates for version 9.0 bump") - public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { - final BuildVersion previousVersion = toBuildVersion(VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null)); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); - nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); - ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); - BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( - createTestContext( - Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(), - createLicensesMetadata(TrialLicenseVersion.fromXContent(previousVersion.toString()), randomFrom("basic", "trial")) - ) - ); - assertThat(result.isSuccess(), is(true)); - } - - private Metadata createLicensesMetadata(TrialLicenseVersion era, String licenseMode) throws Exception { - License license = TestUtils.generateSignedLicense(licenseMode, TimeValue.timeValueHours(2)); - return Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); - } - - private static BuildVersion toBuildVersion(Version version) { - return BuildVersion.fromVersionId(version.id()); - } -} From 7599d4cf43a45407bef3d88b2a0f5de8706fb540 Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Thu, 24 Oct 2024 19:51:52 +1100 Subject: [PATCH 086/202] Use Azure blob batch API to delete blobs in batches (#114566) Closes ES-9777 --- docs/changelog/114566.yaml | 5 + .../repository-azure.asciidoc | 9 + gradle/verification-metadata.xml | 5 + modules/repository-azure/build.gradle | 1 + .../AzureBlobStoreRepositoryMetricsTests.java | 91 +++++++++ .../azure/AzureBlobStoreRepositoryTests.java | 24 ++- .../AzureStorageCleanupThirdPartyTests.java | 5 + .../src/main/java/module-info.java | 5 +- .../azure/AzureBlobContainer.java | 2 +- .../repositories/azure/AzureBlobStore.java | 175 ++++++++++-------- .../azure/AzureClientProvider.java | 14 ++ .../repositories/azure/AzureRepository.java | 15 ++ .../azure/AzureBlobContainerStatsTests.java | 10 + .../java/fixture/azure/AzureHttpHandler.java | 101 ++++++++++ 14 files changed, 374 insertions(+), 88 deletions(-) create mode 100644 docs/changelog/114566.yaml diff --git a/docs/changelog/114566.yaml b/docs/changelog/114566.yaml new file mode 100644 index 0000000000000..6007152bb26ca --- /dev/null +++ b/docs/changelog/114566.yaml @@ -0,0 +1,5 @@ +pr: 114566 +summary: Use Azure blob batch API to delete blobs in batches +area: Distributed +type: enhancement +issues: [] diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index c361414052e14..0e6e1478cfc55 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -259,6 +259,15 @@ include::repository-shared-settings.asciidoc[] `primary_only` or `secondary_only`. Defaults to `primary_only`. Note that if you set it to `secondary_only`, it will force `readonly` to true. +`delete_objects_max_size`:: + + (integer) Sets the maxmimum batch size, betewen 1 and 256, used for `BlobBatch` requests. Defaults to 256 which is the maximum + number supported by the https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#remarks[Azure blob batch API]. + +`max_concurrent_batch_deletes`:: + + (integer) Sets the maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. Defaults to 10, minimum is 1, maximum is 100. + [[repository-azure-validation]] ==== Repository validation rules diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index e2dfa89c8f3b8..5cfe7adb5ea49 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -144,6 +144,11 @@ + + + + + diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index eb938f663c810..d011de81f4fb3 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -30,6 +30,7 @@ dependencies { api "com.azure:azure-identity:1.13.2" api "com.azure:azure-json:1.2.0" api "com.azure:azure-storage-blob:12.27.1" + api "com.azure:azure-storage-blob-batch:12.23.1" api "com.azure:azure-storage-common:12.26.1" api "com.azure:azure-storage-internal-avro:12.12.1" api "com.azure:azure-xml:1.1.0" diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java index a9bf0afa37e18..61940be247861 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryMetricsTests.java @@ -9,14 +9,18 @@ package org.elasticsearch.repositories.azure; +import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.OperationPurpose; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesMetrics; @@ -31,6 +35,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Queue; @@ -43,6 +48,7 @@ import java.util.stream.IntStream; import static org.elasticsearch.repositories.azure.AbstractAzureServerTestCase.randomBlobContent; +import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -225,6 +231,91 @@ public void testRequestTimeIsAccurate() throws IOException { assertThat(recordedRequestTime, lessThanOrEqualTo(elapsedTimeMillis)); } + public void testBatchDeleteFailure() throws IOException { + final int deleteBatchSize = randomIntBetween(1, 30); + final String repositoryName = randomRepositoryName(); + final String repository = createRepository( + repositoryName, + Settings.builder() + .put(repositorySettings(repositoryName)) + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), deleteBatchSize) + .build(), + true + ); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final BlobContainer container = getBlobContainer(dataNodeName, repository); + + final List blobsToDelete = new ArrayList<>(); + final int numberOfBatches = randomIntBetween(3, 20); + final int numberOfBlobs = numberOfBatches * deleteBatchSize; + final int failedBatches = randomIntBetween(1, numberOfBatches); + for (int i = 0; i < numberOfBlobs; i++) { + byte[] bytes = randomBytes(randomInt(100)); + String blobName = "index-" + randomAlphaOfLength(10); + container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); + blobsToDelete.add(blobName); + } + Randomness.shuffle(blobsToDelete); + clearMetrics(dataNodeName); + + // Handler will fail one or more of the batch requests + final RequestHandler failNRequestRequestHandler = createFailNRequestsHandler(failedBatches); + + // Exhaust the retries + IntStream.range(0, (numberOfBatches - failedBatches) + (failedBatches * (MAX_RETRIES + 1))) + .forEach(i -> requestHandlers.offer(failNRequestRequestHandler)); + + logger.info("--> Failing {} of {} batches", failedBatches, numberOfBatches); + + final IOException exception = assertThrows( + IOException.class, + () -> container.deleteBlobsIgnoringIfNotExists(randomPurpose(), blobsToDelete.iterator()) + ); + assertEquals(Math.min(failedBatches, 10), exception.getSuppressed().length); + assertEquals( + (numberOfBatches - failedBatches) + (failedBatches * (MAX_RETRIES + 1L)), + getLongCounterTotal(dataNodeName, RepositoriesMetrics.METRIC_REQUESTS_TOTAL) + ); + assertEquals((failedBatches * (MAX_RETRIES + 1L)), getLongCounterTotal(dataNodeName, RepositoriesMetrics.METRIC_EXCEPTIONS_TOTAL)); + assertEquals(failedBatches * deleteBatchSize, container.listBlobs(randomPurpose()).size()); + } + + private long getLongCounterTotal(String dataNodeName, String metricKey) { + return getTelemetryPlugin(dataNodeName).getLongCounterMeasurement(metricKey) + .stream() + .mapToLong(Measurement::getLong) + .reduce(0L, Long::sum); + } + + /** + * Creates a {@link RequestHandler} that will persistently fail the first numberToFail distinct requests + * it sees. Any other requests are passed through to the delegate. + * + * @param numberToFail The number of requests to fail + * @return the handler + */ + private static RequestHandler createFailNRequestsHandler(int numberToFail) { + final List requestsToFail = new ArrayList<>(numberToFail); + return (exchange, delegate) -> { + final Headers requestHeaders = exchange.getRequestHeaders(); + final String requestId = requestHeaders.get("X-ms-client-request-id").get(0); + boolean failRequest = false; + synchronized (requestsToFail) { + if (requestsToFail.contains(requestId)) { + failRequest = true; + } else if (requestsToFail.size() < numberToFail) { + requestsToFail.add(requestId); + failRequest = true; + } + } + if (failRequest) { + exchange.sendResponseHeaders(500, -1); + } else { + delegate.handle(exchange); + } + }; + } + private void clearMetrics(String discoveryNode) { internalCluster().getInstance(PluginsService.class, discoveryNode) .filterPlugins(TestTelemetryPlugin.class) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 473d91da6e34c..bd21f208faac4 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -89,7 +89,9 @@ protected Settings repositorySettings(String repoName) { .put(super.repositorySettings(repoName)) .put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") - .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test"); + .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256)) + .put(AzureRepository.Repository.MAX_CONCURRENT_BATCH_DELETES_SETTING.getKey(), randomIntBetween(1, 10)); if (randomBoolean()) { settingsBuilder.put(AzureRepository.Repository.BASE_PATH_SETTING.getKey(), randomFrom("test", "test/1")); } @@ -249,6 +251,8 @@ protected void maybeTrack(String request, Headers headers) { trackRequest("PutBlockList"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutBlob"); + } else if (Regex.simpleMatch("POST /*/*?*comp=batch*", request)) { + trackRequest("BlobBatch"); } } @@ -279,10 +283,22 @@ public void testLargeBlobCountDeletion() throws Exception { } public void testDeleteBlobsIgnoringIfNotExists() throws Exception { - try (BlobStore store = newBlobStore()) { + // Test with a smaller batch size here + final int deleteBatchSize = randomIntBetween(1, 30); + final String repositoryName = randomRepositoryName(); + createRepository( + repositoryName, + Settings.builder() + .put(repositorySettings(repositoryName)) + .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), deleteBatchSize) + .build(), + true + ); + try (BlobStore store = newBlobStore(repositoryName)) { final BlobContainer container = store.blobContainer(BlobPath.EMPTY); - List blobsToDelete = new ArrayList<>(); - for (int i = 0; i < 10; i++) { + final int toDeleteCount = randomIntBetween(deleteBatchSize, 3 * deleteBatchSize); + final List blobsToDelete = new ArrayList<>(); + for (int i = 0; i < toDeleteCount; i++) { byte[] bytes = randomBytes(randomInt(100)); String blobName = randomAlphaOfLength(10); container.writeBlob(randomPurpose(), blobName, new BytesArray(bytes), false); diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index abd4f506a0bb3..6d5c17c392141 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Booleans; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -46,6 +48,7 @@ import static org.hamcrest.Matchers.not; public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + private static final Logger logger = LogManager.getLogger(AzureStorageCleanupThirdPartyTests.class); private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); private static final String AZURE_ACCOUNT = System.getProperty("test.azure.account"); @@ -89,8 +92,10 @@ protected SecureSettings credentials() { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.default.account", System.getProperty("test.azure.account")); if (hasSasToken) { + logger.info("--> Using SAS token authentication"); secureSettings.setString("azure.client.default.sas_token", System.getProperty("test.azure.sas_token")); } else { + logger.info("--> Using key authentication"); secureSettings.setString("azure.client.default.key", System.getProperty("test.azure.key")); } return secureSettings; diff --git a/modules/repository-azure/src/main/java/module-info.java b/modules/repository-azure/src/main/java/module-info.java index cd6be56b71543..731f1e0a9986a 100644 --- a/modules/repository-azure/src/main/java/module-info.java +++ b/modules/repository-azure/src/main/java/module-info.java @@ -18,10 +18,7 @@ requires org.apache.logging.log4j; requires org.apache.logging.log4j.core; - requires com.azure.core; requires com.azure.http.netty; - requires com.azure.storage.blob; - requires com.azure.storage.common; requires com.azure.identity; requires io.netty.buffer; @@ -29,7 +26,7 @@ requires io.netty.resolver; requires io.netty.common; - requires reactor.core; requires reactor.netty.core; requires reactor.netty.http; + requires com.azure.storage.blob.batch; } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index a3f26424324fa..52bc1ee1399d4 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -138,7 +138,7 @@ public void writeMetadataBlob( } @Override - public DeleteResult delete(OperationPurpose purpose) { + public DeleteResult delete(OperationPurpose purpose) throws IOException { return blobStore.deleteBlobDirectory(purpose, keyPath); } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 829868797e38c..3c64bb9f3b830 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -25,6 +25,10 @@ import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceAsyncClient; import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.batch.BlobBatch; +import com.azure.storage.blob.batch.BlobBatchAsyncClient; +import com.azure.storage.blob.batch.BlobBatchClientBuilder; +import com.azure.storage.blob.batch.BlobBatchStorageException; import com.azure.storage.blob.models.BlobErrorCode; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobItemProperties; @@ -99,6 +103,8 @@ public class AzureBlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); + // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body + public static final int MAX_ELEMENTS_PER_BATCH = 256; private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) new ByteSizeValue(64, ByteSizeUnit.KB).getBytes(); @@ -110,6 +116,8 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; private final ByteSizeValue maxSinglePartUploadSize; + private final int deletionBatchSize; + private final int maxConcurrentBatchDeletes; private final RequestMetricsRecorder requestMetricsRecorder; private final AzureClientProvider.RequestMetricsHandler requestMetricsHandler; @@ -129,6 +137,8 @@ public AzureBlobStore( // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); this.maxSinglePartUploadSize = Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.get(metadata.settings()); + this.deletionBatchSize = Repository.DELETION_BATCH_SIZE_SETTING.get(metadata.settings()); + this.maxConcurrentBatchDeletes = Repository.MAX_CONCURRENT_BATCH_DELETES_SETTING.get(metadata.settings()); List requestMatchers = List.of( new RequestMatcher((httpMethod, url) -> httpMethod == HttpMethod.HEAD, Operation.GET_BLOB_PROPERTIES), @@ -147,17 +157,14 @@ public AzureBlobStore( && isPutBlockRequest(httpMethod, url) == false && isPutBlockListRequest(httpMethod, url) == false, Operation.PUT_BLOB - ) + ), + new RequestMatcher(AzureBlobStore::isBlobBatch, Operation.BLOB_BATCH) ); this.requestMetricsHandler = (purpose, method, url, metrics) -> { try { URI uri = url.toURI(); String path = uri.getPath() == null ? "" : uri.getPath(); - // Batch delete requests - if (path.contains(container) == false) { - return; - } assert path.contains(container) : uri.toString(); } catch (URISyntaxException ignored) { return; @@ -172,6 +179,10 @@ && isPutBlockListRequest(httpMethod, url) == false, }; } + private static boolean isBlobBatch(HttpMethod method, URL url) { + return method == HttpMethod.POST && url.getQuery() != null && url.getQuery().contains("comp=batch"); + } + private static boolean isListRequest(HttpMethod httpMethod, URL url) { return httpMethod == HttpMethod.GET && url.getQuery() != null && url.getQuery().contains("comp=list"); } @@ -231,95 +242,101 @@ public boolean blobExists(OperationPurpose purpose, String blob) throws IOExcept } } - // number of concurrent blob delete requests to use while bulk deleting - private static final int CONCURRENT_DELETES = 100; - - public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) { + public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) throws IOException { final AtomicInteger blobsDeleted = new AtomicInteger(0); final AtomicLong bytesDeleted = new AtomicLong(0); SocketAccess.doPrivilegedVoidException(() -> { - final BlobContainerAsyncClient blobContainerAsyncClient = asyncClient(purpose).getBlobContainerAsyncClient(container); + final AzureBlobServiceClient client = getAzureBlobServiceClientClient(purpose); + final BlobContainerAsyncClient blobContainerAsyncClient = client.getAsyncClient().getBlobContainerAsyncClient(container); final ListBlobsOptions options = new ListBlobsOptions().setPrefix(path) .setDetails(new BlobListDetails().setRetrieveMetadata(true)); - try { - blobContainerAsyncClient.listBlobs(options, null).flatMap(blobItem -> { - if (blobItem.isPrefix() != null && blobItem.isPrefix()) { - return Mono.empty(); - } else { - final String blobName = blobItem.getName(); - BlobAsyncClient blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobName); - final Mono deleteTask = getDeleteTask(blobName, blobAsyncClient); - bytesDeleted.addAndGet(blobItem.getProperties().getContentLength()); - blobsDeleted.incrementAndGet(); - return deleteTask; - } - }, CONCURRENT_DELETES).then().block(); - } catch (Exception e) { - filterDeleteExceptionsAndRethrow(e, new IOException("Deleting directory [" + path + "] failed")); - } + final Flux blobsFlux = blobContainerAsyncClient.listBlobs(options).filter(bi -> bi.isPrefix() == false).map(bi -> { + bytesDeleted.addAndGet(bi.getProperties().getContentLength()); + blobsDeleted.incrementAndGet(); + return bi.getName(); + }); + deleteListOfBlobs(client, blobsFlux); }); return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } - private static void filterDeleteExceptionsAndRethrow(Exception e, IOException exception) throws IOException { - int suppressedCount = 0; - for (Throwable suppressed : e.getSuppressed()) { - // We're only interested about the blob deletion exceptions and not in the reactor internals exceptions - if (suppressed instanceof IOException) { - exception.addSuppressed(suppressed); - suppressedCount++; - if (suppressedCount > 10) { - break; - } - } + @Override + public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobNames) throws IOException { + if (blobNames.hasNext() == false) { + return; + } + SocketAccess.doPrivilegedVoidException( + () -> deleteListOfBlobs( + getAzureBlobServiceClientClient(purpose), + Flux.fromStream(StreamSupport.stream(Spliterators.spliteratorUnknownSize(blobNames, Spliterator.ORDERED), false)) + ) + ); + } + + private void deleteListOfBlobs(AzureBlobServiceClient azureBlobServiceClient, Flux blobNames) throws IOException { + // We need to use a container-scoped BlobBatchClient, so the restype=container parameter + // is sent, and we can support all SAS token types + // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch?tabs=shared-access-signatures#authorization + final BlobBatchAsyncClient batchAsyncClient = new BlobBatchClientBuilder( + azureBlobServiceClient.getAsyncClient().getBlobContainerAsyncClient(container) + ).buildAsyncClient(); + final List errors; + final AtomicInteger errorsCollected = new AtomicInteger(0); + try { + errors = blobNames.buffer(deletionBatchSize).flatMap(blobs -> { + final BlobBatch blobBatch = batchAsyncClient.getBlobBatch(); + blobs.forEach(blob -> blobBatch.deleteBlob(container, blob)); + return batchAsyncClient.submitBatch(blobBatch).then(Mono.empty()).onErrorResume(t -> { + // Ignore errors that are just 404s, send other errors downstream as values + if (AzureBlobStore.isIgnorableBatchDeleteException(t)) { + return Mono.empty(); + } else { + // Propagate the first 10 errors only + if (errorsCollected.getAndIncrement() < 10) { + return Mono.just(t); + } else { + return Mono.empty(); + } + } + }); + }, maxConcurrentBatchDeletes).collectList().block(); + } catch (Exception e) { + throw new IOException("Error deleting batches", e); + } + if (errors.isEmpty() == false) { + final int totalErrorCount = errorsCollected.get(); + final String errorMessage = totalErrorCount > errors.size() + ? "Some errors occurred deleting batches, the first " + + errors.size() + + " are included as suppressed, but the total count was " + + totalErrorCount + : "Some errors occurred deleting batches, all errors included as suppressed"; + final IOException ex = new IOException(errorMessage); + errors.forEach(ex::addSuppressed); + throw ex; } - throw exception; } /** - * {@inheritDoc} - *

- * Note that in this Azure implementation we issue a series of individual - * delete blob calls rather than aggregating - * deletions into blob batch calls. - * The reason for this is that the blob batch endpoint has limited support for SAS token authentication. + * We can ignore {@link BlobBatchStorageException}s when they are just telling us some of the files were not found * - * @see - * API docs around SAS auth limitations - * @see Java SDK issue - * @see Discussion on implementing PR + * @param exception An exception throw by batch delete + * @return true if it is safe to ignore, false otherwise */ - @Override - public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator blobs) { - if (blobs.hasNext() == false) { - return; - } - - BlobServiceAsyncClient asyncClient = asyncClient(purpose); - SocketAccess.doPrivilegedVoidException(() -> { - final BlobContainerAsyncClient blobContainerClient = asyncClient.getBlobContainerAsyncClient(container); - try { - Flux.fromStream(StreamSupport.stream(Spliterators.spliteratorUnknownSize(blobs, Spliterator.ORDERED), false)) - .flatMap(blob -> getDeleteTask(blob, blobContainerClient.getBlobAsyncClient(blob)), CONCURRENT_DELETES) - .then() - .block(); - } catch (Exception e) { - filterDeleteExceptionsAndRethrow(e, new IOException("Unable to delete blobs")); + private static boolean isIgnorableBatchDeleteException(Throwable exception) { + if (exception instanceof BlobBatchStorageException bbse) { + final Iterable batchExceptions = bbse.getBatchExceptions(); + for (BlobStorageException bse : batchExceptions) { + // If any requests failed with something other than a BLOB_NOT_FOUND, it is not ignorable + if (BlobErrorCode.BLOB_NOT_FOUND.equals(bse.getErrorCode()) == false) { + return false; + } } - }); - } - - private static Mono getDeleteTask(String blobName, BlobAsyncClient blobAsyncClient) { - return blobAsyncClient.delete() - // Ignore not found blobs, as it's possible that due to network errors a request - // for an already deleted blob is retried, causing an error. - .onErrorResume( - e -> e instanceof BlobStorageException blobStorageException && blobStorageException.getStatusCode() == 404, - throwable -> Mono.empty() - ) - .onErrorMap(throwable -> new IOException("Error deleting blob " + blobName, throwable)); + return true; + } + return false; } public InputStream getInputStream(OperationPurpose purpose, String blob, long position, final @Nullable Long length) { @@ -363,8 +380,7 @@ public Map listBlobsByPrefix(OperationPurpose purpose, Str for (final BlobItem blobItem : containerClient.listBlobsByHierarchy("/", listBlobsOptions, null)) { BlobItemProperties properties = blobItem.getProperties(); - Boolean isPrefix = blobItem.isPrefix(); - if (isPrefix != null && isPrefix) { + if (blobItem.isPrefix()) { continue; } String blobName = blobItem.getName().substring(keyPath.length()); @@ -689,7 +705,8 @@ enum Operation { GET_BLOB_PROPERTIES("GetBlobProperties"), PUT_BLOB("PutBlob"), PUT_BLOCK("PutBlock"), - PUT_BLOCK_LIST("PutBlockList"); + PUT_BLOCK_LIST("PutBlockList"), + BLOB_BATCH("BlobBatch"); private final String key; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java index 654742c980268..f92bbcbdd716d 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java @@ -317,6 +317,11 @@ private enum RetryMetricsTracker implements HttpPipelinePolicy { @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (requestIsPartOfABatch(context)) { + // Batch deletes fire once for each of the constituent requests, and they have a null response. Ignore those, we'll track + // metrics at the bulk level. + return next.process(); + } Optional metricsData = context.getData(RequestMetricsTracker.ES_REQUEST_METRICS_CONTEXT_KEY); if (metricsData.isPresent() == false) { assert false : "No metrics object associated with request " + context.getHttpRequest(); @@ -361,6 +366,11 @@ private RequestMetricsTracker(OperationPurpose purpose, RequestMetricsHandler re @Override public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + if (requestIsPartOfABatch(context)) { + // Batch deletes fire once for each of the constituent requests, and they have a null response. Ignore those, we'll track + // metrics at the bulk level. + return next.process(); + } final RequestMetrics requestMetrics = new RequestMetrics(); context.setData(ES_REQUEST_METRICS_CONTEXT_KEY, requestMetrics); return next.process().doOnSuccess((httpResponse) -> { @@ -389,6 +399,10 @@ public HttpPipelinePosition getPipelinePosition() { } } + private static boolean requestIsPartOfABatch(HttpPipelineCallContext context) { + return context.getData("Batch-Operation-Info").isPresent(); + } + /** * The {@link RequestMetricsTracker} calls this when a request completes */ diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 80e662343baee..316db4844e598 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -87,6 +87,21 @@ public static final class Repository { DEFAULT_MAX_SINGLE_UPLOAD_SIZE, Property.NodeScope ); + + /** + * The batch size for batched delete requests + */ + static final Setting DELETION_BATCH_SIZE_SETTING = Setting.intSetting( + "delete_objects_max_size", + AzureBlobStore.MAX_ELEMENTS_PER_BATCH, + 1, + AzureBlobStore.MAX_ELEMENTS_PER_BATCH + ); + + /** + * The maximum number of concurrent batch deletes + */ + static final Setting MAX_CONCURRENT_BATCH_DELETES_SETTING = Setting.intSetting("max_concurrent_batch_deletes", 10, 1, 100); } private final ByteSizeValue chunkSize; diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java index 1ed01bbadc07e..6730e5c3c81bd 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; import java.util.Map; public class AzureBlobContainerStatsTests extends AbstractAzureServerTestCase { @@ -47,6 +48,8 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException os.write(blobContent); os.flush(); }); + // BLOB_BATCH + blobStore.deleteBlobsIgnoringIfNotExists(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()); Map stats = blobStore.stats(); String statsMapString = stats.toString(); @@ -55,6 +58,7 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.GET_BLOB_PROPERTIES))); assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.PUT_BLOCK))); assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.PUT_BLOCK_LIST))); + assertEquals(statsMapString, Long.valueOf(1L), stats.get(statsKey(purpose, AzureBlobStore.Operation.BLOB_BATCH))); } public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless() throws IOException { @@ -79,6 +83,11 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless( os.write(blobContent); os.flush(); }); + // BLOB_BATCH + blobStore.deleteBlobsIgnoringIfNotExists( + purpose, + List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator() + ); } Map stats = blobStore.stats(); @@ -88,6 +97,7 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless( assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.GET_BLOB_PROPERTIES.getKey())); assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.PUT_BLOCK.getKey())); assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.PUT_BLOCK_LIST.getKey())); + assertEquals(statsMapString, Long.valueOf(repeatTimes), stats.get(AzureBlobStore.Operation.BLOB_BATCH.getKey())); } private static String statsKey(OperationPurpose purpose, AzureBlobStore.Operation operation) { diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index d8716fd987f3e..92ce04b6bea5b 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -12,6 +12,9 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; @@ -23,10 +26,14 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -35,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; import java.util.regex.Matcher; @@ -47,6 +55,8 @@ */ @SuppressForbidden(reason = "Uses a HttpServer to emulate an Azure endpoint") public class AzureHttpHandler implements HttpHandler { + private static final Logger logger = LogManager.getLogger(AzureHttpHandler.class); + private final Map blobs; private final String account; private final String container; @@ -264,7 +274,98 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); + } else if (Regex.simpleMatch("POST /" + account + "/" + container + "*restype=container*comp=batch*", request)) { + // Blob Batch (https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch) + final StringBuilder response = new StringBuilder(); + + try (BufferedReader requestReader = new BufferedReader(new InputStreamReader(exchange.getRequestBody()))) { + final String batchBoundary = requestReader.readLine(); + final String responseBoundary = "batch_" + UUID.randomUUID(); + + String line; + String contentId = null, requestId = null, toDelete = null; + while ((line = requestReader.readLine()) != null) { + if (batchBoundary.equals(line) || (batchBoundary + "--").equals(line)) { + // Found the end of a single request, process it + if (contentId == null || requestId == null || toDelete == null) { + throw new IllegalStateException( + "Missing contentId/requestId/toDelete: " + contentId + "/" + requestId + "/" + toDelete + ); + } + + // Process the deletion + if (blobs.remove("/" + account + toDelete) != null) { + final String acceptedPart = Strings.format(""" + --%s + Content-Type: application/http + Content-ID: %s + + HTTP/1.1 202 Accepted + x-ms-delete-type-permanent: true + x-ms-request-id: %s + x-ms-version: 2018-11-09 + + """, responseBoundary, contentId, requestId).replaceAll("\n", "\r\n"); + response.append(acceptedPart); + } else { + final String notFoundBody = Strings.format( + """ + + BlobNotFoundThe specified blob does not exist. + RequestId:%s + Time:%s""", + requestId, + DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now(ZoneId.of("UTC"))) + ); + final String notFoundPart = Strings.format(""" + --%s + Content-Type: application/http + Content-ID: %s + + HTTP/1.1 404 The specified blob does not exist. + x-ms-error-code: BlobNotFound + x-ms-request-id: %s + x-ms-version: 2018-11-09 + Content-Length: %d + Content-Type: application/xml + + %s + """, responseBoundary, contentId, requestId, notFoundBody.length(), notFoundBody) + .replaceAll("\n", "\r\n"); + response.append(notFoundPart); + } + + // Clear the state + toDelete = null; + contentId = null; + requestId = null; + } else if (Regex.simpleMatch("x-ms-client-request-id: *", line)) { + if (requestId != null) { + throw new IllegalStateException("Got multiple request IDs in a single request?"); + } + requestId = line.split("\\s")[1]; + } else if (Regex.simpleMatch("Content-ID: *", line)) { + if (contentId != null) { + throw new IllegalStateException("Got multiple content IDs in a single request?"); + } + contentId = line.split("\\s")[1]; + } else if (Regex.simpleMatch("DELETE /" + container + "/*", line)) { + String blobName = RestUtils.decodeComponent(line.split("(\\s|\\?)")[1]); + if (toDelete != null) { + throw new IllegalStateException("Got multiple deletes in a single request?"); + } + toDelete = blobName; + } + } + response.append("--").append(responseBoundary).append("--\r\n0\r\n"); + // Send the response + exchange.getResponseHeaders().add("Content-Type", "multipart/mixed; boundary=" + responseBoundary); + exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), response.length()); + logger.debug("--> Sending response:\n{}", response); + exchange.getResponseBody().write(response.toString().getBytes(StandardCharsets.UTF_8)); + } } else { + logger.warn("--> Unrecognised request received: {}", request); sendError(exchange, RestStatus.BAD_REQUEST); } } finally { From e0a458441cff9a4242cd93f4c02f06d72f2d63c4 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 24 Oct 2024 11:55:54 +0300 Subject: [PATCH 087/202] [Failure store - selector syntax] Introduce the `::*` selector (#115389) **Introduction** > In order to make adoption of failure stores simpler for all users, we are introducing a new syntactical feature to index expression resolution: The selector. > > Selectors, denoted with a :: followed by a recognized suffix will allow users to specify which component of an index abstraction they would like to operate on within an API call. In this case, an index abstraction is a concrete index, data stream, or alias; Any abstraction that can be resolved to a set of indices/shards. We define a component of an index abstraction to be some searchable unit of the index abstraction. > > To start, we will support two components: data and failures. Concrete indices are their own data components, while the data component for index aliases are all of the indices contained therein. For data streams, the data component corresponds to their backing indices. Data stream aliases mirror this, treating all backing indices of the data streams they correspond to as their data component. > > The failure component is only supported by data streams and data stream aliases. The failure component of these abstractions refer to the data streams' failure stores. Indices and index aliases do not have a failure component. For more details and examples see https://github.com/elastic/elasticsearch/pull/113144. All this work has been cherry picked from there. **Purpose of this PR** This PR is introducing the `::*` as another selector option and not as a combination of `::data` and `::failure`. The reason for this change is that we need to differentiate between: - `my-index::*` which should resolve to `my-index::data` only and not to `my-index::failures` and - a user explicitly requesting `my-index::data, my-index::failures` which should result potentially to an error. --- .../datastreams/DataStreamsSnapshotsIT.java | 2 +- .../IngestFailureStoreMetricsIT.java | 2 +- .../lifecycle/DataStreamLifecycleService.java | 6 +- .../DataStreamLifecycleServiceTests.java | 8 +- .../org/elasticsearch/TransportVersions.java | 1 + .../admin/indices/get/GetIndexRequest.java | 2 +- .../indices/rollover/RolloverRequest.java | 5 +- .../action/bulk/BulkOperation.java | 2 +- .../action/bulk/TransportBulkAction.java | 2 +- .../datastreams/DataStreamsStatsAction.java | 2 +- .../support/IndexComponentSelector.java | 73 ++++++++-- .../action/support/IndicesOptions.java | 133 ++++++++---------- .../indices/RestRolloverIndexAction.java | 2 +- .../indices/get/GetIndexRequestTests.java | 2 +- .../MetadataRolloverServiceTests.java | 4 +- .../rollover/RolloverRequestTests.java | 14 +- .../support/IndexComponentSelectorTests.java | 41 ++++++ .../action/support/IndicesOptionsTests.java | 18 +-- .../IndexNameExpressionResolverTests.java | 16 +-- .../xpack/core/ilm/RolloverStep.java | 4 +- .../core/ilm/WaitForRolloverReadyStep.java | 4 +- 21 files changed, 197 insertions(+), 146 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 212b869c6d933..286ad68896797 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -138,7 +138,7 @@ public void setup() throws Exception { // Initialize the failure store. RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index 679ad5b000c8f..96def04069e24 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -195,7 +195,7 @@ public void testRejectionFromFailureStore() throws IOException { // Initialize failure store. var rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES).build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7bbf7137d290e..7d2828e30d5ab 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -946,7 +946,7 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); updateMergePolicySettingsRequest.indicesOptions( IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); updateMergePolicySettingsRequest.indices(indexName); @@ -1408,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index d6bf80798764d..698ab427ab040 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -225,11 +225,11 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1546,11 +1546,11 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_DATA)); + assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ONLY_FAILURES)); + assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6d9bf2ac52f2d..777ff083f33f8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -180,6 +180,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); + public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 4c5ee08beb192..801dbbdee0858 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -98,7 +98,7 @@ public GetIndexRequest() { super( DataStream.isFailureStoreFeatureFlagEnabled() ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() : IndicesOptions.strictExpandOpen() ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 5a7f330be50c0..552ce727d4249 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.cluster.metadata.DataStream; @@ -124,8 +125,8 @@ public ActionRequestValidationException validate() { ); } - var selectors = indicesOptions.selectorOptions().defaultSelectors(); - if (selectors.size() > 1) { + var selector = indicesOptions.selectorOptions().defaultSelector(); + if (selector == IndexComponentSelector.ALL_APPLICABLE) { validationException = addValidationError( "rollover cannot be applied to both regular and failure indices at the same time", validationException diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 130d6286f7e02..ce3e189149451 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -212,7 +212,7 @@ private void rollOverFailureStores(Runnable runnable) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a3a73415ec4f6..cef68324e2a45 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -418,7 +418,7 @@ private void rollOverDataStreams( if (targetFailureStore) { rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 1c30303915c8e..9266bae439b73 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -61,7 +61,7 @@ public Request() { .allowFailureIndices(true) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java index 65b48db8f5cf3..910be151d1bf5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndexComponentSelector.java @@ -9,6 +9,12 @@ package org.elasticsearch.action.support; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; + +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -17,33 +23,82 @@ * We define as index components the two different sets of indices a data stream could consist of: * - DATA: represents the backing indices * - FAILURES: represent the failing indices + * - ALL: represents all available in this expression components, meaning if it's a data stream both backing and failure indices and if it's + * an index only the index itself. * Note: An index is its own DATA component, but it cannot have a FAILURE component. */ -public enum IndexComponentSelector { - DATA("data"), - FAILURES("failures"); +public enum IndexComponentSelector implements Writeable { + DATA("data", (byte) 0), + FAILURES("failures", (byte) 1), + ALL_APPLICABLE("*", (byte) 2); private final String key; + private final byte id; - IndexComponentSelector(String key) { + IndexComponentSelector(String key, byte id) { this.key = key; + this.id = id; } public String getKey() { return key; } - private static final Map REGISTRY; + public byte getId() { + return id; + } + + private static final Map KEY_REGISTRY; + private static final Map ID_REGISTRY; static { - Map registry = new HashMap<>(IndexComponentSelector.values().length); + Map keyRegistry = new HashMap<>(IndexComponentSelector.values().length); + for (IndexComponentSelector value : IndexComponentSelector.values()) { + keyRegistry.put(value.getKey(), value); + } + KEY_REGISTRY = Collections.unmodifiableMap(keyRegistry); + Map idRegistry = new HashMap<>(IndexComponentSelector.values().length); for (IndexComponentSelector value : IndexComponentSelector.values()) { - registry.put(value.getKey(), value); + idRegistry.put(value.getId(), value); } - REGISTRY = Collections.unmodifiableMap(registry); + ID_REGISTRY = Collections.unmodifiableMap(idRegistry); } + /** + * Retrieves the respective selector when the suffix key is recognised + * @param key the suffix key, probably parsed from an expression + * @return the selector or null if the key was not recognised. + */ + @Nullable public static IndexComponentSelector getByKey(String key) { - return REGISTRY.get(key); + return KEY_REGISTRY.get(key); + } + + public static IndexComponentSelector read(StreamInput in) throws IOException { + return getById(in.readByte()); + } + + // Visible for testing + static IndexComponentSelector getById(byte id) { + IndexComponentSelector indexComponentSelector = ID_REGISTRY.get(id); + if (indexComponentSelector == null) { + throw new IllegalArgumentException( + "Unknown id of index component selector [" + id + "], available options are: " + ID_REGISTRY + ); + } + return indexComponentSelector; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + + public boolean shouldIncludeData() { + return this == ALL_APPLICABLE || this == DATA; + } + + public boolean shouldIncludeFailures() { + return this == ALL_APPLICABLE || this == FAILURES; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 22d019f80837d..85889d8398cb1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -421,61 +421,45 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { /** * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. */ - public record SelectorOptions(EnumSet defaultSelectors) implements Writeable { + public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - public static final SelectorOptions DATA_AND_FAILURE = new SelectorOptions( - EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES) - ); - public static final SelectorOptions ONLY_DATA = new SelectorOptions(EnumSet.of(IndexComponentSelector.DATA)); - public static final SelectorOptions ONLY_FAILURES = new SelectorOptions(EnumSet.of(IndexComponentSelector.FAILURES)); + public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); + public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); + public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); /** * Default instance. Uses
::data
as the default selector if none are present in an index expression. */ - public static final SelectorOptions DEFAULT = ONLY_DATA; + public static final SelectorOptions DEFAULT = DATA; public static SelectorOptions read(StreamInput in) throws IOException { - return new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); + if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { + EnumSet set = in.readEnumSet(IndexComponentSelector.class); + if (set.isEmpty() || set.size() == 2) { + assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) + : "The enum set only supported ::data and ::failures"; + return SelectorOptions.ALL_APPLICABLE; + } else if (set.contains(IndexComponentSelector.DATA)) { + return SelectorOptions.DATA; + } else { + return SelectorOptions.FAILURES; + } + } else { + return new SelectorOptions(IndexComponentSelector.read(in)); + } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeEnumSet(defaultSelectors); - } - - public static class Builder { - private EnumSet defaultSelectors; - - public Builder() { - this(DEFAULT); - } - - Builder(SelectorOptions options) { - defaultSelectors = EnumSet.copyOf(options.defaultSelectors); - } - - public Builder setDefaultSelectors(IndexComponentSelector first, IndexComponentSelector... remaining) { - defaultSelectors = EnumSet.of(first, remaining); - return this; - } - - public Builder setDefaultSelectors(EnumSet defaultSelectors) { - this.defaultSelectors = EnumSet.copyOf(defaultSelectors); - return this; - } - - public SelectorOptions build() { - assert defaultSelectors.isEmpty() != true : "Default selectors cannot be an empty set"; - return new SelectorOptions(EnumSet.copyOf(defaultSelectors)); + if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { + switch (defaultSelector) { + case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); + case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); + case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); + } + } else { + defaultSelector.writeTo(out); } } - - public static Builder builder() { - return new Builder(); - } - - public static Builder builder(SelectorOptions selectorOptions) { - return new Builder(selectorOptions); - } } /** @@ -547,7 +531,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -566,7 +550,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +569,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,7 +606,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -641,7 +625,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -655,7 +639,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -687,7 +671,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -701,7 +685,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -733,7 +717,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,7 +731,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,7 +750,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,7 +769,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,7 +788,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,7 +807,7 @@ private enum Option { .allowFailureIndices(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,7 +826,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -861,7 +845,7 @@ private enum Option { .allowFailureIndices(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ONLY_DATA) + .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -919,7 +903,7 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on closed indices is allowed. + * @return Whether execution on failure indices is allowed. */ public boolean allowFailureIndices() { return gatekeeperOptions.allowFailureIndices(); @@ -950,14 +934,14 @@ public boolean ignoreThrottled() { * @return whether regular indices (stand-alone or backing indices) will be included in the response */ public boolean includeRegularIndices() { - return selectorOptions().defaultSelectors().contains(IndexComponentSelector.DATA); + return selectorOptions().defaultSelector().shouldIncludeData(); } /** * @return whether failure indices (only supported by certain data streams) will be included in the response */ public boolean includeFailureIndices() { - return selectorOptions().defaultSelectors().contains(IndexComponentSelector.FAILURES); + return selectorOptions().defaultSelector().shouldIncludeFailures(); } public void writeIndicesOptions(StreamOutput out) throws IOException { @@ -1004,7 +988,7 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { out.writeBoolean(includeFailureIndices()); } if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { - out.writeEnumSet(selectorOptions.defaultSelectors); + selectorOptions.writeTo(out); } } @@ -1032,15 +1016,15 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti var includeData = in.readBoolean(); var includeFailures = in.readBoolean(); if (includeData && includeFailures) { - selectorOptions = SelectorOptions.DATA_AND_FAILURE; + selectorOptions = SelectorOptions.ALL_APPLICABLE; } else if (includeData) { - selectorOptions = SelectorOptions.ONLY_DATA; + selectorOptions = SelectorOptions.DATA; } else { - selectorOptions = SelectorOptions.ONLY_FAILURES; + selectorOptions = SelectorOptions.FAILURES; } } if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { - selectorOptions = new SelectorOptions(in.readEnumSet(IndexComponentSelector.class)); + selectorOptions = SelectorOptions.read(in); } return new IndicesOptions( options.contains(Option.ALLOW_UNAVAILABLE_CONCRETE_TARGETS) @@ -1099,11 +1083,6 @@ public Builder selectorOptions(SelectorOptions selectorOptions) { return this; } - public Builder selectorOptions(SelectorOptions.Builder selectorOptions) { - this.selectorOptions = selectorOptions.build(); - return this; - } - public IndicesOptions build() { return new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); } @@ -1322,9 +1301,9 @@ private static SelectorOptions parseFailureStoreParameters(Object failureStoreVa return defaultOptions; } return switch (failureStoreValue.toString()) { - case INCLUDE_ALL -> SelectorOptions.DATA_AND_FAILURE; - case INCLUDE_ONLY_REGULAR_INDICES -> SelectorOptions.ONLY_DATA; - case INCLUDE_ONLY_FAILURE_INDICES -> SelectorOptions.ONLY_FAILURES; + case INCLUDE_ALL -> SelectorOptions.ALL_APPLICABLE; + case INCLUDE_ONLY_REGULAR_INDICES -> SelectorOptions.DATA; + case INCLUDE_ONLY_FAILURE_INDICES -> SelectorOptions.FAILURES; default -> throw new IllegalArgumentException("No valid " + FAILURE_STORE_QUERY_PARAM + " value [" + failureStoreValue + "]"); }; } @@ -1336,9 +1315,9 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par gatekeeperOptions.toXContent(builder, params); if (DataStream.isFailureStoreFeatureFlagEnabled()) { String displayValue; - if (SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions())) { + if (SelectorOptions.ALL_APPLICABLE.equals(selectorOptions())) { displayValue = INCLUDE_ALL; - } else if (SelectorOptions.ONLY_DATA.equals(selectorOptions())) { + } else if (SelectorOptions.DATA.equals(selectorOptions())) { displayValue = INCLUDE_ONLY_REGULAR_INDICES; } else { displayValue = INCLUDE_ONLY_FAILURE_INDICES; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 942844dd1dd16..776302296b1a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -69,7 +69,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (failureStore) { rolloverIndexRequest.setIndicesOptions( IndicesOptions.builder(rolloverIndexRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build() ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java index a75b50e3a88f4..3bbc03a333438 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestTests.java @@ -82,6 +82,6 @@ public void testIndicesOptions() { ); assertThat(getIndexRequest.indicesOptions().wildcardOptions(), equalTo(IndicesOptions.strictExpandOpen().wildcardOptions())); assertThat(getIndexRequest.indicesOptions().gatekeeperOptions(), equalTo(IndicesOptions.strictExpandOpen().gatekeeperOptions())); - assertThat(getIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA_AND_FAILURE)); + assertThat(getIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.ALL_APPLICABLE)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index b9fdb13958632..1a30fae1ebc00 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -754,7 +754,7 @@ public void testValidation() throws Exception { .promoteDataStream(); rolloverTarget = dataStream.getName(); if (dataStream.isFailureStoreEnabled() && randomBoolean()) { - defaultSelectorOptions = IndicesOptions.SelectorOptions.ONLY_FAILURES; + defaultSelectorOptions = IndicesOptions.SelectorOptions.FAILURES; sourceIndexName = dataStream.getFailureStoreWriteIndex().getName(); defaultRolloverIndexName = DataStream.getDefaultFailureStoreName( dataStream.getName(), @@ -815,7 +815,7 @@ public void testValidation() throws Exception { true, null, null, - IndicesOptions.SelectorOptions.ONLY_FAILURES.equals(defaultSelectorOptions) + IndicesOptions.SelectorOptions.FAILURES.equals(defaultSelectorOptions) ); newIndexName = newIndexName == null ? defaultRolloverIndexName : newIndexName; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 08e92c833dc85..f0190790ba001 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -34,9 +33,7 @@ import org.junit.Before; import java.io.IOException; -import java.util.EnumSet; import java.util.Map; -import java.util.Set; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -178,14 +175,7 @@ public void testSerialize() throws Exception { ); originalRequest.lazy(randomBoolean()); originalRequest.setIndicesOptions( - IndicesOptions.builder(originalRequest.indicesOptions()) - .selectorOptions( - IndicesOptions.SelectorOptions.builder() - .setDefaultSelectors( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ) - ) - .build() + IndicesOptions.builder(originalRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE).build() ); try (BytesStreamOutput out = new BytesStreamOutput()) { @@ -266,7 +256,7 @@ public void testValidation() { RolloverRequest rolloverRequest = new RolloverRequest("alias-index", "new-index-name"); rolloverRequest.setIndicesOptions( IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); ActionRequestValidationException validationException = rolloverRequest.validate(); diff --git a/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java b/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java new file mode 100644 index 0000000000000..73d4ab59ce479 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/IndexComponentSelectorTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class IndexComponentSelectorTests extends ESTestCase { + + public void testIndexComponentSelectorFromKey() { + assertThat(IndexComponentSelector.getByKey("data"), equalTo(IndexComponentSelector.DATA)); + assertThat(IndexComponentSelector.getByKey("failures"), equalTo(IndexComponentSelector.FAILURES)); + assertThat(IndexComponentSelector.getByKey("*"), equalTo(IndexComponentSelector.ALL_APPLICABLE)); + assertThat(IndexComponentSelector.getByKey("d*ta"), nullValue()); + assertThat(IndexComponentSelector.getByKey("_all"), nullValue()); + assertThat(IndexComponentSelector.getByKey("**"), nullValue()); + assertThat(IndexComponentSelector.getByKey("failure"), nullValue()); + } + + public void testIndexComponentSelectorFromId() { + assertThat(IndexComponentSelector.getById((byte) 0), equalTo(IndexComponentSelector.DATA)); + assertThat(IndexComponentSelector.getById((byte) 1), equalTo(IndexComponentSelector.FAILURES)); + assertThat(IndexComponentSelector.getById((byte) 2), equalTo(IndexComponentSelector.ALL_APPLICABLE)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IndexComponentSelector.getById((byte) 3)); + assertThat( + exception.getMessage(), + containsString("Unknown id of index component selector [3], available options are: {0=DATA, 1=FAILURES, 2=ALL_APPLICABLE}") + ); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 1784ab863bf1c..de7b43ad091fa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -30,11 +30,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import static org.hamcrest.CoreMatchers.equalTo; @@ -58,13 +56,7 @@ public void testSerialization() throws Exception { .allowAliasToMultipleIndices(randomBoolean()) .allowClosedIndices(randomBoolean()) ) - .selectorOptions( - IndicesOptions.SelectorOptions.builder() - .setDefaultSelectors( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ) - .build() - ) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); BytesStreamOutput output = new BytesStreamOutput(); @@ -350,9 +342,7 @@ public void testToXContent() throws IOException { randomBoolean() ); GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - IndicesOptions.SelectorOptions selectorOptions = new IndicesOptions.SelectorOptions( - EnumSet.copyOf(randomNonEmptySubsetOf(Set.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES))) - ); + IndicesOptions.SelectorOptions selectorOptions = new IndicesOptions.SelectorOptions(randomFrom(IndexComponentSelector.values())); IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions, selectorOptions); @@ -370,9 +360,9 @@ public void testToXContent() throws IOException { assertThat(map.get("allow_no_indices"), equalTo(wildcardOptions.allowEmptyExpressions())); assertThat(map.get("ignore_throttled"), equalTo(gatekeeperOptions.ignoreThrottled())); String displayValue; - if (IndicesOptions.SelectorOptions.DATA_AND_FAILURE.equals(selectorOptions)) { + if (IndicesOptions.SelectorOptions.ALL_APPLICABLE.equals(selectorOptions)) { displayValue = "include"; - } else if (IndicesOptions.SelectorOptions.ONLY_DATA.equals(selectorOptions)) { + } else if (IndicesOptions.SelectorOptions.DATA.equals(selectorOptions)) { displayValue = "exclude"; } else { displayValue = "only"; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index d58de5ca65ea0..99470918ce063 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -2737,7 +2737,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(4)); @@ -2751,7 +2751,7 @@ public void testDataStreamsWithFailureStore() { // We expect that they will be skipped { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .concreteTargetOptions(IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) .build(); @@ -2765,7 +2765,7 @@ public void testDataStreamsWithFailureStore() { // We expect an error { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowFailureIndices(false).build()) .build(); FailureIndexNotSupportedException failureIndexNotSupportedException = expectThrows( @@ -2781,7 +2781,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with an exact data stream name { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-data-stream"); assertThat(result.length, equalTo(2)); @@ -2808,7 +2808,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(5)); @@ -2828,7 +2828,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store without any expressions { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true); assertThat(result.length, equalTo(2)); @@ -2861,7 +2861,7 @@ public void testDataStreamsWithFailureStore() { // Test include failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.DATA_AND_FAILURE) + .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(5)); @@ -2881,7 +2881,7 @@ public void testDataStreamsWithFailureStore() { // Test only failure store with wildcard expression { IndicesOptions indicesOptions = IndicesOptions.builder(IndicesOptions.STRICT_EXPAND_OPEN) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) + .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) .build(); Index[] result = indexNameExpressionResolver.concreteIndices(state, indicesOptions, true, "my-*"); assertThat(result.length, equalTo(2)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java index d648dd1c7edf8..3d140f5a9d764 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverStep.java @@ -126,9 +126,7 @@ public void performAction( RolloverRequest rolloverRequest = new RolloverRequest(rolloverTarget, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (targetFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } // We don't wait for active shards when we perform the rollover because the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java index 67f65481ef63e..aa20e33a3fbf2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStep.java @@ -247,9 +247,7 @@ RolloverRequest createRolloverRequest( rolloverRequest.setConditions(applyDefaultConditions(conditions, rolloverOnlyIfHasDocuments)); if (targetFailureStore) { rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ONLY_FAILURES) - .build() + IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() ); } return rolloverRequest; From 8a3540fa74de36f62bafebdb719b22ef88879bf7 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:44:13 +0200 Subject: [PATCH 088/202] [DOCS] Clarify start-local trial license info (#115504) --- README.asciidoc | 2 +- docs/reference/run-elasticsearch-locally.asciidoc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.asciidoc b/README.asciidoc index 8d3c96c659896..bac6d0ed71752 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -56,8 +56,8 @@ Quickly set up Elasticsearch and Kibana in Docker for local development or testi - If you're using Microsoft Windows, then install https://learn.microsoft.com/en-us/windows/wsl/install[Windows Subsystem for Linux (WSL)]. ==== Trial license +This setup comes with a one-month trial license that includes all Elastic features. -This setup comes with a one-month trial of the Elastic *Platinum* license. After the trial period, the license reverts to *Free and open - Basic*. Refer to https://www.elastic.co/subscriptions[Elastic subscriptions] for more information. diff --git a/docs/reference/run-elasticsearch-locally.asciidoc b/docs/reference/run-elasticsearch-locally.asciidoc index 1a115ae926ea2..03885132e4050 100644 --- a/docs/reference/run-elasticsearch-locally.asciidoc +++ b/docs/reference/run-elasticsearch-locally.asciidoc @@ -20,7 +20,7 @@ Refer to <> for a list of produc Quickly set up {es} and {kib} in Docker for local development or testing, using the https://github.com/elastic/start-local?tab=readme-ov-file#-try-elasticsearch-and-kibana-locally[`start-local` script]. -This setup comes with a one-month trial of the Elastic *Platinum* license. +This setup comes with a one-month trial license that includes all Elastic features. After the trial period, the license reverts to *Free and open - Basic*. Refer to https://www.elastic.co/subscriptions[Elastic subscriptions] for more information. @@ -84,4 +84,4 @@ Learn about customizing the setup, logging, and more. [[local-dev-next-steps]] === Next steps -Use our <> to learn the basics of {es}. \ No newline at end of file +Use our <> to learn the basics of {es}. From 031a80d2dc8509d9a48a50261e18f6252c00560b Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 24 Oct 2024 11:30:33 +0100 Subject: [PATCH 089/202] Use BuildVersion rather than Version for reserved state version (#115406) --- .../settings/LocallyMountedSecrets.java | 4 +-- .../org/elasticsearch/env/BuildVersion.java | 15 +++++++++++ .../env/DefaultBuildVersion.java | 12 +++++---- .../internal/BuildExtension.java | 5 ++++ .../service/ReservedClusterStateService.java | 4 +-- .../service/ReservedStateUpdateTask.java | 5 ++-- .../service/ReservedStateVersion.java | 14 ++++------ .../service/FileSettingsServiceTests.java | 4 +-- .../ReservedClusterStateServiceTests.java | 27 ++++++++++--------- .../ReservedLifecycleStateServiceTests.java | 4 +-- 10 files changed, 57 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java index e4f1608a52d15..4a2e1cd92d4da 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java +++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java @@ -11,11 +11,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.reservedstate.service.ReservedStateVersion; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -130,7 +130,7 @@ public LocallyMountedSecrets(Environment environment) { throw new IllegalStateException("Error processing secrets file", e); } } else { - secrets.set(new LocalFileSecrets(Map.of(), new ReservedStateVersion(-1L, Version.CURRENT))); + secrets.set(new LocalFileSecrets(Map.of(), new ReservedStateVersion(-1L, BuildVersion.current()))); } this.secretsDir = secretsDirPath.toString(); this.secretsFile = secretsFilePath.toString(); diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 3fdf01d7e1bae..5536b06d4d587 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -72,6 +72,16 @@ public static BuildVersion fromVersionId(int versionId) { return CurrentExtensionHolder.BUILD_EXTENSION.fromVersionId(versionId); } + /** + * Create a {@link BuildVersion} from a version string. + * + * @param version A string representation of a version + * @return a version representing a build or release of Elasticsearch + */ + public static BuildVersion fromString(String version) { + return CurrentExtensionHolder.BUILD_EXTENSION.fromString(version); + } + /** * Get the current build version. * @@ -110,6 +120,11 @@ public BuildVersion currentBuildVersion() { public BuildVersion fromVersionId(int versionId) { return new DefaultBuildVersion(versionId); } + + @Override + public BuildVersion fromString(String version) { + return new DefaultBuildVersion(version); + } } } diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index f31b34e89c01d..9cf0d60719653 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -28,15 +28,17 @@ final class DefaultBuildVersion extends BuildVersion { public static BuildVersion CURRENT = new DefaultBuildVersion(Version.CURRENT.id()); - private final int versionId; private final Version version; DefaultBuildVersion(int versionId) { assert versionId >= 0 : "Release version IDs must be non-negative integers"; - this.versionId = versionId; this.version = Version.fromId(versionId); } + DefaultBuildVersion(String version) { + this.version = Version.fromString(Objects.requireNonNull(version)); + } + @Override public boolean onOrAfterMinimumCompatible() { return Version.CURRENT.minimumCompatibilityVersion().onOrBefore(version); @@ -49,7 +51,7 @@ public boolean isFutureVersion() { @Override public int id() { - return versionId; + return version.id(); } @Override @@ -57,12 +59,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultBuildVersion that = (DefaultBuildVersion) o; - return versionId == that.versionId; + return version.equals(that.version); } @Override public int hashCode() { - return Objects.hash(versionId); + return Objects.hash(version.id()); } @Override diff --git a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java index a23270cb5550c..427e186bc40cf 100644 --- a/server/src/main/java/org/elasticsearch/internal/BuildExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/BuildExtension.java @@ -38,4 +38,9 @@ default boolean hasReleaseVersioning() { * Returns the {@link BuildVersion} for a given version identifier. */ BuildVersion fromVersionId(int versionId); + + /** + * Returns the {@link BuildVersion} for a given version string. + */ + BuildVersion fromString(String version); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 0c5fa61b29cfe..499b5e6515a8c 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -22,6 +21,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Tuple; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -158,7 +158,7 @@ public void process( } public void initEmpty(String namespace, ActionListener listener) { - var missingVersion = new ReservedStateVersion(EMPTY_VERSION, Version.CURRENT); + var missingVersion = new ReservedStateVersion(EMPTY_VERSION, BuildVersion.current()); var emptyState = new ReservedStateChunk(Map.of(), missingVersion); updateTaskQueue.submitTask( "empty initial cluster state [" + namespace + "]", diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index c85997f72cc78..90ae9923910d1 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; @@ -162,11 +161,11 @@ static boolean checkMetadataVersion( ReservedStateVersion reservedStateVersion, ReservedStateVersionCheck versionCheck ) { - if (Version.CURRENT.before(reservedStateVersion.minCompatibleVersion())) { + if (reservedStateVersion.buildVersion().isFutureVersion()) { logger.warn( () -> format( "Reserved cluster state version [%s] for namespace [%s] is not compatible with this Elasticsearch node", - reservedStateVersion.minCompatibleVersion(), + reservedStateVersion.buildVersion(), namespace ) ); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java index e2a21689b9815..116d470755e1c 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateVersion.java @@ -9,10 +9,10 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -23,7 +23,7 @@ * File settings metadata class that holds information about * versioning and Elasticsearch version compatibility */ -public record ReservedStateVersion(Long version, Version compatibleWith) implements Writeable { +public record ReservedStateVersion(Long version, BuildVersion buildVersion) implements Writeable { public static final ParseField VERSION = new ParseField("version"); public static final ParseField COMPATIBILITY = new ParseField("compatibility"); @@ -32,7 +32,7 @@ public record ReservedStateVersion(Long version, Version compatibleWith) impleme "reserved_cluster_state_version_metadata", a -> { Long updateId = Long.parseLong((String) a[0]); - Version minCompatVersion = Version.fromString((String) a[1]); + BuildVersion minCompatVersion = BuildVersion.fromString((String) a[1]); return new ReservedStateVersion(updateId, minCompatVersion); } @@ -47,17 +47,13 @@ public static ReservedStateVersion parse(XContentParser parser) { return PARSER.apply(parser, null); } - public Version minCompatibleVersion() { - return compatibleWith; - } - public static ReservedStateVersion readFrom(StreamInput input) throws IOException { - return new ReservedStateVersion(input.readLong(), Version.readVersion(input)); + return new ReservedStateVersion(input.readLong(), BuildVersion.fromVersionId(input.readVInt())); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version()); - Version.writeVersion(compatibleWith(), out); + out.writeVInt(buildVersion().id()); } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index c0657b5888ad2..8af36e2f9677e 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.tasks.TaskManager; @@ -277,7 +277,7 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { throw new RuntimeException(e); } }).start(); - return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, Version.CURRENT)); + return new ReservedStateChunk(Map.of(), new ReservedStateVersion(1L, BuildVersion.current())); }).when(controller).parse(any(String.class), any()); doAnswer((Answer) invocation -> { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index d96387618e6bd..5c7dd6cb346b9 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.reservedstate.service; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -396,7 +396,7 @@ public TransformState transform(Object source, TransformState prevState) throws assertTrue(ReservedStateErrorTask.isNewError(null, 1L, ReservedStateVersionCheck.HIGHER_VERSION_ONLY)); assertTrue(ReservedStateErrorTask.isNewError(null, 1L, ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION)); - var chunk = new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)); + var chunk = new ReservedStateChunk(Map.of("one", "two", "maker", "three"), new ReservedStateVersion(2L, BuildVersion.current())); var orderedHandlers = List.of(exceptionThrower.name(), newStateMaker.name()); // We submit a task with two handler, one will cause an exception, the other will create a new state. @@ -456,7 +456,7 @@ public void testCheckMetadataVersion() { ReservedStateUpdateTask task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -466,7 +466,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should be modified", task.execute(state), not(sameInstance(state))); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -477,7 +477,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -487,7 +487,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(123L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -498,7 +498,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -508,7 +508,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, Version.CURRENT)), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(122L, BuildVersion.current())), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -519,7 +519,7 @@ public void testCheckMetadataVersion() { task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), ReservedStateVersionCheck.HIGHER_VERSION_ONLY, Map.of(), List.of(), @@ -529,7 +529,7 @@ public void testCheckMetadataVersion() { assertThat("Cluster state should not be modified", task.execute(state), sameInstance(state)); task = new ReservedStateUpdateTask( "test", - new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, Version.fromId(Version.CURRENT.id + 1))), + new ReservedStateChunk(Map.of(), new ReservedStateVersion(124L, BuildVersion.fromVersionId(BuildVersion.current().id() + 1))), ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION, Map.of(), List.of(), @@ -627,7 +627,7 @@ public void testCheckAndReportError() { assertNull(controller.checkAndReportError("test", List.of(), null, ReservedStateVersionCheck.HIGHER_VERSION_ONLY)); verify(controller, times(0)).updateErrorState(any()); - var version = new ReservedStateVersion(2L, Version.CURRENT); + var version = new ReservedStateVersion(2L, BuildVersion.current()); var error = controller.checkAndReportError("test", List.of("test error"), version, ReservedStateVersionCheck.HIGHER_VERSION_ONLY); assertThat(error, instanceOf(IllegalStateException.class)); assertThat(error.getMessage(), is("Error processing state change request for test, errors: test error")); @@ -659,7 +659,10 @@ public TransformState transform(Object source, TransformState prevState) { Metadata metadata = Metadata.builder().put(operatorMetadata).build(); ClusterState state = ClusterState.builder(new ClusterName("test")).metadata(metadata).build(); - var chunk = new ReservedStateChunk(Map.of("non-state", "two", "maker", "three"), new ReservedStateVersion(2L, Version.CURRENT)); + var chunk = new ReservedStateChunk( + Map.of("non-state", "two", "maker", "three"), + new ReservedStateVersion(2L, BuildVersion.current()) + ); var orderedHandlers = List.of(exceptionThrower.name(), newStateMaker.name()); ClusterService clusterService = mock(ClusterService.class); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java index aab89c6620b52..bcd6026618a05 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ilm.action; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterModule; @@ -22,6 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.BuildVersion; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -418,7 +418,7 @@ public void testOperatorControllerWithPluginPackage() { ) ) ), - new ReservedStateVersion(123L, Version.CURRENT) + new ReservedStateVersion(123L, BuildVersion.current()) ); controller.process("operator", pack, randomFrom(ReservedStateVersionCheck.values()), x::set); From 327f23254a8ea26bb462488b4cd06ab8604acd8e Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Thu, 24 Oct 2024 11:41:47 +0100 Subject: [PATCH 090/202] Allow for queries on _tier to skip shards during coordinator rewrite (#114990) The `_tier` metadata field was not used on the coordinator when rewriting queries in order to exclude shards that don't match. This lead to queries in the following form to continue to report failures even though the only unavailable shards were in the tier that was excluded from search (frozen tier in this example): ``` POST testing/_search { "query": { "bool": { "must_not": [ { "term": { "_tier": "data_frozen" } } ] } } } ``` This PR addresses this by having the queries that can execute on `_tier` (term, match, query string, simple query string, prefix, wildcard) execute a coordinator rewrite to exclude the indices that don't match the `_tier` query **before** attempting to reach to the shards (shards, that might not be available and raise errors). Fixes #114910 --- docs/changelog/114990.yaml | 6 + .../query/CoordinatorRewriteContext.java | 65 +++++++- .../CoordinatorRewriteContextProvider.java | 9 +- .../index/query/PrefixQueryBuilder.java | 18 ++- .../index/query/QueryRewriteContext.java | 21 +++ .../index/query/TermQueryBuilder.java | 18 ++- .../index/query/TermsQueryBuilder.java | 17 ++- .../index/query/WildcardQueryBuilder.java | 20 ++- .../index/query/PrefixQueryBuilderTests.java | 35 +++++ .../index/query/QueryRewriteContextTests.java | 131 ++++++++++++++++ .../index/query/TermQueryBuilderTests.java | 34 +++++ .../index/query/TermsQueryBuilderTests.java | 33 ++++ .../query/WildcardQueryBuilderTests.java | 34 +++++ .../test/AbstractBuilderTestCase.java | 15 +- .../mapper/DataTierFieldMapper.java | 26 +--- .../core/LocalStateCompositeXPackPlugin.java | 7 +- ...pshotsCanMatchOnCoordinatorIntegTests.java | 143 +++++++++++++++++- 17 files changed, 594 insertions(+), 38 deletions(-) create mode 100644 docs/changelog/114990.yaml create mode 100644 server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java diff --git a/docs/changelog/114990.yaml b/docs/changelog/114990.yaml new file mode 100644 index 0000000000000..2575942d15bf5 --- /dev/null +++ b/docs/changelog/114990.yaml @@ -0,0 +1,6 @@ +pr: 114990 +summary: Allow for querries on `_tier` to skip shards in the `can_match` phase +area: Search +type: bug +issues: + - 114910 diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 3e5deeeebae5d..964358610e074 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,17 +9,23 @@ package org.elasticsearch.index.query; +import org.apache.lucene.search.Query; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; +import java.util.Map; import java.util.function.LongSupplier; /** @@ -30,20 +36,57 @@ * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { + + public static final String TIER_FIELD_NAME = "_tier"; + + private static final ConstantFieldType TIER_FIELD_TYPE = new ConstantFieldType(TIER_FIELD_NAME, Map.of()) { + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + throw new UnsupportedOperationException("fetching field values is not supported on the coordinator node"); + } + + @Override + public String typeName() { + return TIER_FIELD_NAME; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteContext context) { + if (caseInsensitive) { + pattern = Strings.toLowercaseAscii(pattern); + } + + String tierPreference = context.getTierPreference(); + if (tierPreference == null) { + return false; + } + return Regex.simpleMatch(pattern, tierPreference); + } + + @Override + public Query existsQuery(SearchExecutionContext context) { + throw new UnsupportedOperationException("field exists query is not supported on the coordinator node"); + } + }; + private final DateFieldRangeInfo dateFieldRangeInfo; + private final String tier; /** * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * * @param parserConfig * @param client * @param nowInMillis * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + * @param tier the configured data tier (via the _tier_preference setting) for the index */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - DateFieldRangeInfo dateFieldRangeInfo + DateFieldRangeInfo dateFieldRangeInfo, + String tier ) { super( parserConfig, @@ -63,10 +106,12 @@ public CoordinatorRewriteContext( null ); this.dateFieldRangeInfo = dateFieldRangeInfo; + this.tier = tier; } /** - * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD, IndexMetadata.EVENT_INGESTED_FIELD_NAME, or + * DataTierFiledMapper.NAME * @return MappedField with type for the field. Returns null if fieldName is not one of the allowed field names. */ @Nullable @@ -75,6 +120,8 @@ public MappedFieldType getFieldType(String fieldName) { return dateFieldRangeInfo.timestampFieldType(); } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { return dateFieldRangeInfo.eventIngestedFieldType(); + } else if (TIER_FIELD_NAME.equals(fieldName)) { + return TIER_FIELD_TYPE; } else { return null; } @@ -99,4 +146,18 @@ public IndexLongFieldRange getFieldRange(String fieldName) { public CoordinatorRewriteContext convertToCoordinatorRewriteContext() { return this; } + + @Override + public String getTierPreference() { + // dominant branch first (tier preference is configured) + return tier.isEmpty() == false ? tier : null; + } + + /** + * We're holding on to the index tier in the context as otherwise we'd need + * to re-parse it from the index settings when evaluating the _tier field. + */ + public String tier() { + return tier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index 67042a98db42a..e48d7699d03ef 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -52,6 +52,12 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { return null; } DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + // we've now added a coordinator rewrite based on the _tier field so the requirement + // for the timestamps fields to be present is artificial (we could do a coordinator + // rewrite only based on the _tier field) and we might decide to remove this artificial + // limitation to enable coordinator rewrites based on _tier for hot and warm indices + // (currently the _tier coordinator rewrite is only available for mounted and partially mounted + // indices) if (dateFieldRangeInfo == null) { return null; } @@ -74,7 +80,8 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { parserConfig, client, nowInMillis, - new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange) + new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange), + indexMetadata.getTierPreference().isEmpty() == false ? indexMetadata.getTierPreference().getFirst() : "" ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index 24817b778a4da..fcf986191da23 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; @@ -189,11 +190,24 @@ public String getWriteableName() { } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 8808cd79072f6..fce74aa60ab16 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -11,9 +11,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResolvedIndices; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; @@ -407,4 +410,22 @@ public ResolvedIndices getResolvedIndices() { public PointInTimeBuilder getPointInTimeBuilder() { return pit; } + + /** + * Retrieve the first tier preference from the index setting. If the setting is not + * present, then return null. + */ + @Nullable + public String getTierPreference() { + Settings settings = getIndexSettings().getSettings(); + String value = DataTier.TIER_PREFERENCE_SETTING.get(settings); + + if (Strings.hasText(value) == false) { + return null; + } + + // Tier preference can be a comma-delimited list of tiers, ordered by preference + // It was decided we should only test the first of these potentially multiple preferences. + return value.split(",")[0].trim(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index 2978b3bfbf69c..113f66f3e58de 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.xcontent.ParseField; @@ -170,11 +171,24 @@ protected void addExtraXContent(XContentBuilder builder, Params params) throws I } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java index 4035bc02fba79..dec4090a3e6bd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsQueryBuilder.java @@ -393,11 +393,24 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index f287812ebbc10..419195e5e5ba5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.support.QueryParsers; @@ -200,11 +201,24 @@ public static WildcardQueryBuilder fromXContent(XContentParser parser) throws IO } @Override - protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) { MappedFieldType fieldType = context.getFieldType(this.fieldName); if (fieldType == null) { - return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); - } else if (fieldType instanceof ConstantFieldType constantFieldType) { + return new MatchNoneQueryBuilder("The \"" + getName() + "\" query is against a field that does not exist"); + } + return maybeRewriteBasedOnConstantFields(fieldType, context); + } + + @Override + protected QueryBuilder doCoordinatorRewrite(CoordinatorRewriteContext coordinatorRewriteContext) { + MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(this.fieldName); + // we don't rewrite a null field type to `match_none` on the coordinator because the coordinator has access + // to only a subset of fields see {@link CoordinatorRewriteContext#getFieldType} + return maybeRewriteBasedOnConstantFields(fieldType, coordinatorRewriteContext); + } + + private QueryBuilder maybeRewriteBasedOnConstantFields(@Nullable MappedFieldType fieldType, QueryRewriteContext context) { + if (fieldType instanceof ConstantFieldType constantFieldType) { // This logic is correct for all field types, but by only applying it to constant // fields we also have the guarantee that it doesn't perform I/O, which is important // since rewrites might happen on a network thread. diff --git a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java index 0260fa2ef4cc8..918815f2a4f77 100644 --- a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java @@ -17,7 +17,9 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; import java.io.IOException; @@ -175,4 +177,37 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new PrefixQueryBuilder("_tier", "data_fro"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new PrefixQueryBuilder("_tier", "data_fro")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java new file mode 100644 index 0000000000000..0b2a8ab4856b3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/QueryRewriteContextTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.indices.DateFieldRangeInfo; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class QueryRewriteContextTests extends ESTestCase { + + public void testGetTierPreference() { + { + // cold->hot tier preference + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + QueryRewriteContext context = new QueryRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + null, + MappingLookup.EMPTY, + Collections.emptyMap(), + new IndexSettings(metadata, Settings.EMPTY), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertThat(context.getTierPreference(), is("data_cold")); + } + + { + // missing tier preference + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build() + ); + QueryRewriteContext context = new QueryRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + null, + MappingLookup.EMPTY, + Collections.emptyMap(), + new IndexSettings(metadata, Settings.EMPTY), + null, + null, + null, + null, + null, + null, + null, + null + ); + + assertThat(context.getTierPreference(), is(nullValue())); + } + + { + // coordinator rewrite context + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), null), + "data_frozen" + ); + + assertThat(coordinatorRewriteContext.getTierPreference(), is("data_frozen")); + } + { + // coordinator rewrite context empty tier + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(DataTier.TIER_PREFERENCE, "data_cold,data_warm,data_hot") + .build() + ); + CoordinatorRewriteContext coordinatorRewriteContext = new CoordinatorRewriteContext( + parserConfig(), + null, + System::currentTimeMillis, + new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), null), + "" + ); + + assertThat(coordinatorRewriteContext.getTierPreference(), is(nullValue())); + } + } + + public static IndexMetadata newIndexMeta(String name, Settings indexSettings) { + return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 1).put(indexSettings)).build(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java index b5cf42cf5df28..bbac216754eed 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermQueryBuilderTests.java @@ -17,9 +17,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.xcontent.json.JsonStringEncoder; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.Locale; @@ -238,4 +240,36 @@ public void testLongTerm() throws IOException { { "term" : { "foo" : "%s" } }""", longTerm))); assertThat(e.getMessage(), containsString("term starting with [aaaaa")); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new TermQueryBuilder("_tier", "data_frozen"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new TermQueryBuilder("_tier", "data_frozen")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java index 1ce69355379de..2faee7bc89eb5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsQueryBuilderTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -317,6 +318,38 @@ public void testLongTerm() throws IOException { assertThat(e.getMessage(), containsString("term starting with [aaaaa")); } + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new TermsQueryBuilder("_tier", "data_frozen"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new TermsQueryBuilder("_tier", "data_frozen")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } + @Override protected QueryBuilder parseQuery(XContentParser parser) throws IOException { QueryBuilder query = super.parseQuery(parser); diff --git a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java index 7ee6d75a08736..182bd4d6b5b86 100644 --- a/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/WildcardQueryBuilderTests.java @@ -15,7 +15,9 @@ import org.apache.lucene.search.WildcardQuery; import org.elasticsearch.common.ParsingException; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.AbstractQueryTestCase; +import org.hamcrest.CoreMatchers; import java.io.IOException; import java.util.HashMap; @@ -166,4 +168,36 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testCoordinatorTierRewriteToMatchAll() throws IOException { + QueryBuilder query = new WildcardQueryBuilder("_tier", "data_fr*"); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchAllQueryBuilder.class)); + } + + public void testCoordinatorTierRewriteToMatchNone() throws IOException { + QueryBuilder query = QueryBuilders.boolQuery().mustNot(new WildcardQueryBuilder("_tier", "data_fro*")); + final String timestampFieldName = "@timestamp"; + long minTimestamp = 1685714000000L; + long maxTimestamp = 1685715000000L; + final CoordinatorRewriteContext coordinatorRewriteContext = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType(timestampFieldName), + minTimestamp, + maxTimestamp, + "data_frozen" + ); + + QueryBuilder rewritten = query.rewrite(coordinatorRewriteContext); + assertThat(rewritten, CoreMatchers.instanceOf(MatchNoneQueryBuilder.class)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 77ff194e2681d..0543bc7a78f8b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -342,6 +342,15 @@ protected static CoordinatorRewriteContext createCoordinatorRewriteContext( return serviceHolder.createCoordinatorContext(dateFieldType, min, max); } + protected static CoordinatorRewriteContext createCoordinatorRewriteContext( + DateFieldMapper.DateFieldType dateFieldType, + long min, + long max, + String tier + ) { + return serviceHolder.createCoordinatorContext(dateFieldType, min, max, tier); + } + protected static DataRewriteContext dataRewriteContext() { return serviceHolder.createDataContext(); } @@ -625,13 +634,17 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { + return createCoordinatorContext(dateFieldType, min, max, ""); + } + + CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max, String tier) { DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); - return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo, tier); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java index 527f8d1c176ec..0e185a90ed39b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java @@ -10,10 +10,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -55,7 +53,7 @@ protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteC pattern = Strings.toLowercaseAscii(pattern); } - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); if (tierPreference == null) { return false; } @@ -64,7 +62,7 @@ protected boolean matches(String pattern, boolean caseInsensitive, QueryRewriteC @Override public Query existsQuery(SearchExecutionContext context) { - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); if (tierPreference == null) { return new MatchNoDocsQuery(); } @@ -77,26 +75,9 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); } - String tierPreference = getTierPreference(context); + String tierPreference = context.getTierPreference(); return tierPreference == null ? ValueFetcher.EMPTY : ValueFetcher.singleton(tierPreference); } - - /** - * Retrieve the first tier preference from the index setting. If the setting is not - * present, then return null. - */ - private static String getTierPreference(QueryRewriteContext context) { - Settings settings = context.getIndexSettings().getSettings(); - String value = DataTier.TIER_PREFERENCE_SETTING.get(settings); - - if (Strings.hasText(value) == false) { - return null; - } - - // Tier preference can be a comma-delimited list of tiers, ordered by preference - // It was decided we should only test the first of these potentially multiple preferences. - return value.split(",")[0].trim(); - } } public DataTierFieldMapper() { @@ -107,4 +88,5 @@ public DataTierFieldMapper() { protected String contentType() { return CONTENT_TYPE; } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 918976c0d3db8..1f2c89c473a62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -637,10 +637,15 @@ public Collection getSystemIndexDescriptors(Settings sett @Override public Map getMetadataMappers() { - return filterPlugins(MapperPlugin.class).stream() + Map pluginsMetadataMappers = filterPlugins(MapperPlugin.class).stream() .map(MapperPlugin::getMetadataMappers) .flatMap(map -> map.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + // the xpack plugin itself exposes a metadata mapper so let's include it as well + Map metadataMappersIncludingXPackPlugin = new HashMap<>(pluginsMetadataMappers); + metadataMappersIncludingXPackPlugin.putAll(super.getMetadataMappers()); + return metadataMappersIncludingXPackPlugin; } @Override diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index eab73fbe5ad04..ed42d86bc8c49 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -20,14 +20,18 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; @@ -36,6 +40,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.NodeRoles; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xcontent.XContentFactory; @@ -51,6 +56,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING; +import static org.elasticsearch.cluster.node.DiscoveryNode.getRolesFromSettings; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -76,14 +82,24 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { final Settings initialSettings = super.nodeSettings(nodeOrdinal, otherSettings); - if (DiscoveryNode.canContainData(otherSettings)) { + + if (DiscoveryNode.canContainData(otherSettings) + && getRolesFromSettings(otherSettings).stream() + .anyMatch( + nr -> nr.roleName().equals(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE.roleName()) + || nr.roleName().equals(DiscoveryNodeRole.DATA_ROLE.roleName()) + )) { return Settings.builder() .put(initialSettings) // Have a shared cache of reasonable size available on each node because tests randomize over frozen and cold allocation .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofMb(randomLongBetween(1, 10))) .build(); } else { - return initialSettings; + return Settings.builder() + .put(initialSettings) + // Have a shared cache of reasonable size available on each node because tests randomize over frozen and cold allocation + .putNull(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey()) + .build(); } } @@ -955,6 +971,129 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } + public void testCanMatchSkipsPartiallyMountedIndicesWhenFrozenNodesUnavailable() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startNode( + NodeRoles.onlyRole(DiscoveryNodeRole.DATA_CONTENT_NODE_ROLE) + ); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startNode( + NodeRoles.onlyRole(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE) + ); + + final String indexToMountInFrozen = "frozen-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int shardCount = randomIntBetween(2, 3); + createIndexWithTimestampAndEventIngested(indexToMountInFrozen, shardCount, Settings.EMPTY); + final int numDocsFrozenIndex = between(350, 1000); + indexRandomDocs(indexToMountInFrozen, numDocsFrozenIndex); + + final String regularIndex = "regular-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndexWithTimestampAndEventIngested( + regularIndex, + shardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + int numDocsRegularIndex = between(100, 1000); + indexDocumentsWithTimestampAndEventIngestedDates(regularIndex, numDocsRegularIndex, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexToMountInFrozen)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexToMountInFrozen)); + + final String partiallyMountedIndex = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + partiallyMountedIndex, + repositoryName, + snapshotId.getName(), + indexToMountInFrozen, + Settings.EMPTY, + Strings.EMPTY_ARRAY, + false, + MountSearchableSnapshotRequest.Storage.SHARED_CACHE + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + ensureGreen(regularIndex, partiallyMountedIndex); + + // Stop the node holding the searchable snapshots, and since we defined + // the index allocation criteria to require the searchable snapshot + // index to be allocated in that node, the shards should remain unassigned + internalCluster().stopNode(dataNodeHoldingSearchableSnapshot); + final IndexMetadata partiallyMountedIndexMetadata = getIndexMetadata(partiallyMountedIndex); + waitUntilAllShardsAreUnassigned(partiallyMountedIndexMetadata.getIndex()); + + { + // term query + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("_tier", "data_content"); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(termQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // termS query + TermsQueryBuilder termsQueryBuilder = QueryBuilders.termsQuery("_tier", "data_hot", "data_content"); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(termsQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // bool term query + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("_tier", "data_frozen")); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(boolQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + + { + // bool prefix, wildcard + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery() + .mustNot(randomFrom(QueryBuilders.wildcardQuery("_tier", "dat*ozen"), QueryBuilders.prefixQuery("_tier", "data_fro"))); + List indicesToSearch = List.of(regularIndex, partiallyMountedIndex); + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(boolQueryBuilder)); + + assertResponse(client().search(request), searchResponse -> { + // as we excluded the frozen tier we shouldn't get any failures + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // we should be receiving all the hits from the index that's in the data_content tier + assertNotNull(searchResponse.getHits().getTotalHits()); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) numDocsRegularIndex)); + }); + } + } + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) From 4fb7a4f1e98cb2934bf1427bb9dba0140a481dd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 24 Oct 2024 14:07:06 +0200 Subject: [PATCH 091/202] [DOCS] Improve inference API documentation (#115235) Co-authored-by: David Kyle --- .../inference/inference-apis.asciidoc | 18 ++++ .../inference/service-elasticsearch.asciidoc | 94 +++++++++++++++++-- .../inference/service-elser.asciidoc | 3 +- 3 files changed, 104 insertions(+), 11 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index b291b464be498..ddcff1abc7dce 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -34,6 +34,24 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. + +[discrete] +[[default-enpoints]] +=== Default {infer} endpoints + +Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. +The following list contains the default {infer} endpoints listed by `inference_id`: + +* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) +* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) + +Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. +The API call will automatically download and deploy the model which might take a couple of minutes. +Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. +For these models, the minimum number of allocations is `0`. +If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. + + include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index efa0c78b8356f..259779a12134d 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,12 +1,9 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service -Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` -service. +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. -NOTE: If you use the E5 model through the `elasticsearch` service, the API -request will automatically download and deploy the model if it isn't downloaded -yet. +NOTE: If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. [discrete] @@ -56,6 +53,11 @@ These settings are specific to the `elasticsearch` service. (Optional, object) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] +`deployment_id`::: +(Optional, string) +The `deployment_id` of an existing trained model deployment. +When `deployment_id` is used the `model_id` is optional. + `enabled`:::: (Optional, Boolean) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] @@ -71,7 +73,7 @@ include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-min-number] `model_id`::: (Required, string) The name of the model to use for the {infer} task. -It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model already +It can be the ID of either a built-in model (for example, `.multilingual-e5-small` for E5), a text embedding model already {ml-docs}/ml-nlp-import-model.html#ml-nlp-import-script[uploaded through Eland]. `num_allocations`::: @@ -98,15 +100,44 @@ Returns the document instead of only the index. Defaults to `true`. ===== +[discrete] +[[inference-example-elasticsearch-elser]] +==== ELSER via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. + +The API request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { <1> + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, + "num_threads": 1, + "model_id": ".elser_model_2" <2> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> Adaptive allocations will be enabled with the minimum of 1 and the maximum of 10 allocations. +<2> The `model_id` must be the ID of one of the built-in ELSER models. +Valid values are `.elser_model_2` and `.elser_model_2_linux-x86_64`. +For further details, refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation]. + + [discrete] [[inference-example-elasticsearch]] ==== E5 via the `elasticsearch` service -The following example shows how to create an {infer} endpoint called -`my-e5-model` to perform a `text_embedding` task type. +The following example shows how to create an {infer} endpoint called `my-e5-model` to perform a `text_embedding` task type. -The API request below will automatically download the E5 model if it isn't -already downloaded and then deploy the model. +The API request below will automatically download the E5 model if it isn't already downloaded and then deploy the model. [source,console] ------------------------------------------------------------ @@ -185,3 +216,46 @@ PUT _inference/text_embedding/my-e5-model } ------------------------------------------------------------ // TEST[skip:TBD] + + +[discrete] +[[inference-example-existing-deployment]] +==== Using an existing model deployment with the `elasticsearch` service + +The following example shows how to use an already existing model deployment when creating an {infer} endpoint. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/use_existing_deployment +{ + "service": "elasticsearch", + "service_settings": { + "deployment_id": ".elser_model_2" <1> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `deployment_id` of the already existing model deployment. + +The API response contains the `model_id`, and the threads and allocations settings from the model deployment: + +[source,console-result] +------------------------------------------------------------ +{ + "inference_id": "use_existing_deployment", + "task_type": "sparse_embedding", + "service": "elasticsearch", + "service_settings": { + "num_allocations": 2, + "num_threads": 1, + "model_id": ".elser_model_2", + "deployment_id": ".elser_model_2" + }, + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 250, + "sentence_overlap": 1 + } +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 6afc2a2e3ef65..521fab0375584 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -2,6 +2,7 @@ === ELSER {infer} service Creates an {infer} endpoint to perform an {infer} task with the `elser` service. +You can also deploy ELSER by using the <>. NOTE: The API request will automatically download and deploy the ELSER model if it isn't already downloaded. @@ -128,7 +129,7 @@ If using the Python client, you can set the `timeout` parameter to a higher valu [discrete] [[inference-example-elser-adaptive-allocation]] -==== Setting adaptive allocation for the ELSER service +==== Setting adaptive allocations for the ELSER service NOTE: For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. From 4e4fe9c3a99faaded41b6c08d98bf8eda6f3ea6b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 23:28:55 +1100 Subject: [PATCH 092/202] Mute org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT testDeploymentSurvivesRestart {cluster=UPGRADED} #115528 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8b9c3cc6ce712..2d5349ed03b48 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 +- class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT + method: testDeploymentSurvivesRestart {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/115528 # Examples: # From f774d0ee8249fef76182f76d401a97e217c53981 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 14:58:37 +0200 Subject: [PATCH 093/202] Remove Delivery team as codeowners for gradle build scripts (#115523) --- .github/CODEOWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5b98444c044d2..540da14402192 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -39,7 +39,6 @@ gradle @elastic/es-delivery build-conventions @elastic/es-delivery build-tools @elastic/es-delivery build-tools-internal @elastic/es-delivery -*.gradle @elastic/es-delivery .buildkite @elastic/es-delivery .ci @elastic/es-delivery .idea @elastic/es-delivery From 889d2c346e4ab498875b1bb0aaaee88c54f4c1a2 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 24 Oct 2024 09:03:12 -0400 Subject: [PATCH 094/202] [ESQL] Enable "any type" aggregations on Date Nanos (#114438) Resolves #110002 Resolves #110003 Resolves #110005 Enable Values, Count, CountDistinct, Min and Max aggregations on date nanos. In the course of addressing this, I had to make some changes to AggregateMapper where it maps types into string names. I tried to refactor this once before (#110841) but at the time we decided not to go ahead with it. That bit me while working on this, and so I am trying again to refactor it. This time I've made a more localized change, just replacing the cascading if block with a switch. That will cause a compile time failure when future new data types are added, unless they correctly update this section. I've also done a small refactoring on the aggregators themselves, to make the supplier function consistent with the typeResolution. --------- Co-authored-by: Elastic Machine --- .../src/main/resources/date_nanos.csv | 1 + .../src/main/resources/date_nanos.csv-spec | 31 ++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 +++ .../function/aggregate/CountDistinct.java | 40 ++++++++++-------- .../expression/function/aggregate/Max.java | 42 +++++++++---------- .../expression/function/aggregate/Min.java | 42 +++++++++---------- .../expression/function/aggregate/Values.java | 38 +++++++++-------- .../xpack/esql/planner/AggregateMapper.java | 31 ++++++-------- 8 files changed, 131 insertions(+), 99 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv index 029c3baf3cbfb..26b6f055221a6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv @@ -6,5 +6,6 @@ millis:date,nanos:date_nanos,num:long 2023-10-23T13:33:34.937Z,2023-10-23T13:33:34.937193000Z,1698068014937193000 2023-10-23T12:27:28.948Z,2023-10-23T12:27:28.948000000Z,1698064048948000000 2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 +2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 1999-10-23T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-02-23T13:33:34.937193000Z, 2023-01-23T13:55:01.543123456Z], 0 1999-10-22T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z], 0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index 515e2c9c6587f..d0edc1f07d021 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -216,6 +216,7 @@ l:long 1698068014937193000 1698064048948000000 1698063303360103847 +1698063303360103847 ; long to date nanos, index version @@ -231,6 +232,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; date_nanos to date nanos, index version @@ -246,6 +248,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; attempt to cast the result of a fold to date nanos @@ -331,3 +334,31 @@ a:date_nanos [2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] [2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] ; + + +Max and Min of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | STATS max = MAX(nanos), min = MIN(nanos); + +max:date_nanos | min:date_nanos +2023-10-23T13:55:01.543123456Z | 2023-01-23T13:55:01.543123456Z +; + +Count and count distinct of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS count = COUNT(nanos), count_distinct = COUNT_DISTINCT(nanos); + +count:long | count_distinct:long +8 | 7 +; + +Values aggregation on date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS v = MV_SORT(VALUES(nanos), "DESC"); + +v:date_nanos +[2023-10-23T13:55:01.543123456Z, 2023-10-23T13:53:55.832987654Z, 2023-10-23T13:52:55.015787878Z, 2023-10-23T13:51:54.732102837Z, 2023-10-23T13:33:34.937193000Z, 2023-10-23T12:27:28.948000000Z, 2023-10-23T12:15:03.360103847Z] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index f22ad07a4c6f6..55236af648236 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -313,6 +313,11 @@ public enum Cap { */ LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** + * support aggregations on date nanos + */ + DATE_NANOS_AGGREGATIONS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** * Support for datetime in least and greatest functions */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 756000dfbb187..5ae162f1fbb12 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; @@ -53,6 +55,20 @@ public class CountDistinct extends AggregateFunction implements OptionalArgument CountDistinct::new ); + private static final Map, Integer, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + // Booleans ignore the precision because there are only two possible values anyway + Map.entry(DataType.BOOLEAN, (inputChannels, precision) -> new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels)), + Map.entry(DataType.LONG, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, CountDistinctIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, CountDistinctDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new) + ); + private static final int DEFAULT_PRECISION = 3000; private final Expression precision; @@ -102,7 +118,7 @@ public CountDistinct( Source source, @Param( name = "field", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Column or literal for which to count the number of distinct values." ) Expression field, @Param( @@ -179,7 +195,7 @@ protected TypeResolution resolveType() { .and( isType( field(), - dt -> dt != DataType.UNSIGNED_LONG && dt != DataType.SOURCE, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any exact type except unsigned_long, _source, or counter types" @@ -196,23 +212,11 @@ protected TypeResolution resolveType() { public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); int precision = this.precision == null ? DEFAULT_PRECISION : ((Number) this.precision.fold()).intValue(); - if (type == DataType.BOOLEAN) { - // Booleans ignore the precision because there are only two possible values anyway - return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DATETIME || type == DataType.LONG) { - return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.INTEGER) { - return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.DOUBLE) { - return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, precision); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, precision); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels, precision); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 6119b2ce58465..ee16193efdccc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MaxBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MaxIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MaxDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MaxIpAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MaxBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The maximum value of a field.", @@ -98,7 +110,7 @@ public Max replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MaxBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MaxLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MaxIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MaxDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MaxIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index a1492f79da393..7aaa41ea6ab11 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MinBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MinIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MinDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MinIpAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MinBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The minimum value of a field.", @@ -98,7 +110,7 @@ public Min withFilter(Expression filter) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MinBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MinLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MinIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MinDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MinIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index a844b981c95d6..8d576839c3c5c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -29,14 +29,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public class Values extends AggregateFunction implements ToAggregator { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Values", Values::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.INTEGER, ValuesIntAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, ValuesDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.BOOLEAN, ValuesBooleanAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, preview = true, @@ -98,7 +112,7 @@ public DataType dataType() { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - dt -> DataType.isSpatial(dt) == false && dt != UNSIGNED_LONG, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any type except unsigned_long and spatial types" @@ -108,22 +122,10 @@ protected TypeResolution resolveType() { @Override public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.INTEGER) { - return new ValuesIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new ValuesLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new ValuesDoubleAggregatorFunctionSupplier(inputChannels); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new ValuesBytesRefAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.BOOLEAN) { - return new ValuesBooleanAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - // TODO cartesian_point, geo_point - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index c322135198262..3e81c2a2c1101 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -297,25 +297,18 @@ private static String dataTypeToString(DataType type, Class aggClass) { if (aggClass == Top.class && type.equals(DataType.IP)) { return "Ip"; } - if (type.equals(DataType.BOOLEAN)) { - return "Boolean"; - } else if (type.equals(DataType.INTEGER) || type.equals(DataType.COUNTER_INTEGER)) { - return "Int"; - } else if (type.equals(DataType.LONG) || type.equals(DataType.DATETIME) || type.equals(DataType.COUNTER_LONG)) { - return "Long"; - } else if (type.equals(DataType.DOUBLE) || type.equals(DataType.COUNTER_DOUBLE)) { - return "Double"; - } else if (type.equals(DataType.KEYWORD) - || type.equals(DataType.IP) - || type.equals(DataType.VERSION) - || type.equals(DataType.TEXT)) { - return "BytesRef"; - } else if (type.equals(GEO_POINT)) { - return "GeoPoint"; - } else if (type.equals(CARTESIAN_POINT)) { - return "CartesianPoint"; - } else { + + return switch (type) { + case DataType.BOOLEAN -> "Boolean"; + case DataType.INTEGER, DataType.COUNTER_INTEGER -> "Int"; + case DataType.LONG, DataType.DATETIME, DataType.COUNTER_LONG, DataType.DATE_NANOS -> "Long"; + case DataType.DOUBLE, DataType.COUNTER_DOUBLE -> "Double"; + case DataType.KEYWORD, DataType.IP, DataType.VERSION, DataType.TEXT -> "BytesRef"; + case GEO_POINT -> "GeoPoint"; + case CARTESIAN_POINT -> "CartesianPoint"; + case SEMANTIC_TEXT, UNSUPPORTED, NULL, UNSIGNED_LONG, SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, SOURCE, DATE_PERIOD, + TIME_DURATION, CARTESIAN_SHAPE, GEO_SHAPE, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG -> throw new EsqlIllegalArgumentException("illegal agg type: " + type.typeName()); - } + }; } } From 28715b791a88de6b3f2ccb6b4f097a9881f01007 Mon Sep 17 00:00:00 2001 From: mspielberg <9729801+mspielberg@users.noreply.github.com> Date: Thu, 24 Oct 2024 06:06:39 -0700 Subject: [PATCH 095/202] Add documentation for minimum_should_match (#113043) --- .../reference/query-dsl/terms-set-query.asciidoc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 2abfe54d53976..27717af3ac171 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -159,12 +159,22 @@ GET /job-candidates/_search `terms`:: + -- -(Required, array of strings) Array of terms you wish to find in the provided +(Required, array) Array of terms you wish to find in the provided ``. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. -The required number of matching terms is defined in the -`minimum_should_match_field` or `minimum_should_match_script` parameter. +The required number of matching terms is defined in the `minimum_should_match`, +`minimum_should_match_field` or `minimum_should_match_script` parameters. Exactly +one of these parameters must be provided. +-- + +`minimum_should_match`:: ++ +-- +(Optional) Specification for the number of matching terms required to return +a document. + +For valid values, see <>. -- `minimum_should_match_field`:: From 6980fc62531923b68accc204fc25e7dea59760e3 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:11:10 +0200 Subject: [PATCH 096/202] [DOCS] Add text_expansion deprecation usage note (#115529) --- docs/reference/query-dsl/text-expansion-query.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 235a413df686f..5c7bce8c3fcf0 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -7,6 +7,13 @@ deprecated[8.15.0, This query has been replaced by <>.] +.Deprecation usage note +**** +You can continue using `rank_features` fields with `text_expansion` queries in the current version. +However, if you plan to upgrade, we recommend updating mappings to use the `sparse_vector` field type and <>. +This will allow you to take advantage of the new capabilities and improvements available in newer versions. +**** + The text expansion query uses a {nlp} model to convert the query text into a list of token-weight pairs which are then used in a query against a <> or <> field. From 833f2fb9185072b0f8edcd2576d512ff91810277 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 24 Oct 2024 07:27:23 -0600 Subject: [PATCH 097/202] (Doc+) link video for resolving max shards open (#115480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy team! @anniegale9538 and my [video](https://www.youtube.com/watch?v=tZKbDegt4-M) demonstrates how to resolve `max shards open` errors as a common support ask. --- docs/reference/how-to/size-your-shards.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 8770ec373bb18..86f195d030223 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -572,7 +572,7 @@ PUT _cluster/settings } ---- -For more information, see <>. +See this https://www.youtube.com/watch?v=tZKbDegt4-M[fixing "max shards open" video] for an example troubleshooting walkthrough. For more information, see <>. [discrete] [[troubleshooting-max-docs-limit]] From e99607b5895880d11b4981279314bcbb6b0fe3a9 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Thu, 24 Oct 2024 16:29:14 +0300 Subject: [PATCH 098/202] Adding breaking change entry for retrievers (#115399) --- docs/changelog/115399.yaml | 29 +++++++++++++++++++ .../TextSimilarityRankRetrieverBuilder.java | 2 +- .../xpack/rank/rrf/RRFRetrieverBuilder.java | 2 +- 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115399.yaml diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml new file mode 100644 index 0000000000000..9f69657a5d167 --- /dev/null +++ b/docs/changelog/115399.yaml @@ -0,0 +1,29 @@ +pr: 115399 +summary: Adding breaking change entry for retrievers +area: Search +type: breaking +issues: [] +breaking: + title: Reworking RRF retriever to be evaluated during rewrite phase + area: REST API + details: |- + In this release (8.16), we have introduced major changes to the retrievers framework + and how they can be evaluated, focusing mainly on compound retrievers + like `rrf` and `text_similarity_reranker`, which allowed us to support full + composability (i.e. any retriever can be nested under any compound retriever), + as well as supporting additional search features like collapsing, explaining, + aggregations, and highlighting. + + To ensure consistency, and given that this rework is not available until 8.16, + `rrf` and `text_similarity_reranker` retriever queries would now + throw an exception in a mixed cluster scenario, where there are nodes + both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. + + As part of the rework, we have also removed the `_rank` property from + the responses of an `rrf` retriever. + impact: |- + - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario + with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. + - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field + will throw an exception + notable: false diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 342199dc51db8..91b6cdc61afe4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -81,7 +81,7 @@ public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser par throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); } if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException( + throw new IllegalArgumentException( "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" ); } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index c3c9f19cde6ef..792ff4eac3893 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -83,7 +83,7 @@ public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverP throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); } if (context.clusterSupportsFeature(RRF_RETRIEVER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); + throw new IllegalArgumentException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); } if (RRFRankPlugin.RANK_RRF_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { throw LicenseUtils.newComplianceException("Reciprocal Rank Fusion (RRF)"); From 28882e86b200e9dfef47e6615bfd993d35f17abd Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 24 Oct 2024 14:30:32 +0100 Subject: [PATCH 099/202] Report JVM stats for all memory pools (97046) (#115117) This fix allows reporting of all JVM memory pools sizes in JVM stats --- docs/changelog/115117.yaml | 6 ++++++ .../elasticsearch/monitor/jvm/GcNames.java | 15 +++++++++++++- .../elasticsearch/monitor/jvm/JvmStats.java | 5 +---- .../monitor/jvm/JvmStatsTests.java | 20 +++++++++++++++++-- 4 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115117.yaml diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml new file mode 100644 index 0000000000000..de2defcd46afd --- /dev/null +++ b/docs/changelog/115117.yaml @@ -0,0 +1,6 @@ +pr: 115117 +summary: Report JVM stats for all memory pools (97046) +area: Infra/Core +type: bug +issues: + - 97046 diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java index 9db8e8f414d5c..3494204c330c0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java @@ -15,8 +15,14 @@ public class GcNames { public static final String OLD = "old"; public static final String SURVIVOR = "survivor"; + private GcNames() {} + /** - * Resolves the GC type by its memory pool name ({@link java.lang.management.MemoryPoolMXBean#getName()}. + * Resolves the memory area name by the memory pool name provided by {@link java.lang.management.MemoryPoolMXBean#getName()} + * + * @param poolName the name of the memory pool from {@link java.lang.management.MemoryPoolMXBean} + * @param defaultName the name to return if the pool name does not match any known memory area + * @return memory area name corresponding to the pool name or {@code defaultName} if no match is found */ public static String getByMemoryPoolName(String poolName, String defaultName) { if ("Eden Space".equals(poolName) @@ -40,6 +46,13 @@ public static String getByMemoryPoolName(String poolName, String defaultName) { return defaultName; } + /** + * Resolves the GC type by the GC name provided by {@link java.lang.management.GarbageCollectorMXBean#getName()} + * + * @param gcName the name of the GC from {@link java.lang.management.GarbageCollectorMXBean} + * @param defaultName the name to return if the GC name does not match any known GC type + * @return GC type corresponding to the GC name or {@code defaultName} if no match is found + */ public static String getByGcName(String gcName, String defaultName) { if ("Copy".equals(gcName) || "PS Scavenge".equals(gcName) || "ParNew".equals(gcName) || "G1 Young Generation".equals(gcName)) { return YOUNG; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index 0a2763474b8df..e6b109207fdf3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -64,10 +64,7 @@ public static JvmStats jvmStats() { List pools = new ArrayList<>(); for (MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeans) { try { - String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), null); - if (name == null) { // if we can't resolve it, its not interesting.... (Per Gen, Code Cache) - continue; - } + String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), memoryPoolMXBean.getName()); MemoryUsage usage = memoryPoolMXBean.getUsage(); MemoryUsage peakUsage = memoryPoolMXBean.getPeakUsage(); pools.add( diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java index 12fa776dd7efd..28976d803ff53 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java @@ -13,17 +13,22 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; public class JvmStatsTests extends ESTestCase { - public void testJvmStats() throws IOException { + public void testJvmStats() { JvmStats stats = JvmStats.jvmStats(); assertNotNull(stats); assertNotNull(stats.getUptime()); @@ -40,6 +45,17 @@ public void testJvmStats() throws IOException { assertNotNull(mem.getHeapUsedPercent()); assertThat(mem.getHeapUsedPercent(), anyOf(equalTo((short) -1), greaterThanOrEqualTo((short) 0))); + // Memory pools + Map memoryPools = StreamSupport.stream(stats.getMem().spliterator(), false) + .collect(Collectors.toMap(JvmStats.MemoryPool::getName, Function.identity())); + assertThat(memoryPools, hasKey(GcNames.YOUNG)); + assertThat(memoryPools, hasKey(GcNames.OLD)); + assertThat(memoryPools, hasKey("Metaspace")); + assertThat(memoryPools.keySet(), hasSize(greaterThan(3))); + for (JvmStats.MemoryPool memoryPool : memoryPools.values()) { + assertThat(memoryPool.getUsed().getBytes(), greaterThan(0L)); + } + // Threads JvmStats.Threads threads = stats.getThreads(); assertNotNull(threads); From 37c7137f39d13ce36785c0bed01f2f058da886f8 Mon Sep 17 00:00:00 2001 From: Gergely Kalapos Date: Thu, 24 Oct 2024 15:49:45 +0200 Subject: [PATCH 100/202] [otel-data] Add more kubernetes aliases (#115429) * Add more kubernetes aliases * Update docs/changelog/115429.yaml * Review feedback --------- Co-authored-by: Elastic Machine --- docs/changelog/115429.yaml | 5 ++ .../semconv-resource-to-ecs@mappings.yaml | 48 +++++++++++++++++++ .../rest-api-spec/test/20_logs_tests.yml | 37 ++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 docs/changelog/115429.yaml diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml new file mode 100644 index 0000000000000..ddf3c69183000 --- /dev/null +++ b/docs/changelog/115429.yaml @@ -0,0 +1,5 @@ +pr: 115429 +summary: "[otel-data] Add more kubernetes aliases" +area: Data streams +type: bug +issues: [] diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml index 6645e7d282520..eb5cd6d37af83 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml @@ -56,21 +56,45 @@ template: os.version: type: keyword ignore_above: 1024 + k8s.container.name: + type: keyword + ignore_above: 1024 + k8s.cronjob.name: + type: keyword + ignore_above: 1024 + k8s.daemonset.name: + type: keyword + ignore_above: 1024 k8s.deployment.name: type: keyword ignore_above: 1024 + k8s.job.name: + type: keyword + ignore_above: 1024 k8s.namespace.name: type: keyword ignore_above: 1024 + k8s.node.hostname: + type: keyword + ignore_above: 1024 k8s.node.name: type: keyword ignore_above: 1024 + k8s.node.uid: + type: keyword + ignore_above: 1024 k8s.pod.name: type: keyword ignore_above: 1024 k8s.pod.uid: type: keyword ignore_above: 1024 + k8s.replicaset.name: + type: keyword + ignore_above: 1024 + k8s.statefulset.name: + type: keyword + ignore_above: 1024 service.node.name: type: alias path: resource.attributes.service.instance.id @@ -122,6 +146,30 @@ template: kubernetes.pod.uid: type: alias path: resource.attributes.k8s.pod.uid + kubernetes.container.name: + type: alias + path: resource.attributes.k8s.container.name + kubernetes.cronjob.name: + type: alias + path: resource.attributes.k8s.cronjob.name + kubernetes.job.name: + type: alias + path: resource.attributes.k8s.job.name + kubernetes.statefulset.name: + type: alias + path: resource.attributes.k8s.statefulset.name + kubernetes.daemonset.name: + type: alias + path: resource.attributes.k8s.daemonset.name + kubernetes.replicaset.name: + type: alias + path: resource.attributes.k8s.replicaset.name + kubernetes.node.uid: + type: alias + path: resource.attributes.k8s.node.uid + kubernetes.node.hostname: + type: alias + path: resource.attributes.k8s.node.hostname # Below are non-ECS fields that may be used by Kibana. service.language.name: type: alias diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index 6bc0cee78be4f..63966e601a3cb 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -187,3 +187,40 @@ host.name pass-through: - length: { hits.hits: 1 } - match: { hits.hits.0.fields.resource\.attributes\.host\.name: [ "localhost" ] } - match: { hits.hits.0.fields.host\.name: [ "localhost" ] } +--- +"kubernetes.* -> resource.attributes.k8s.* aliases": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: { } + - "@timestamp": 2024-07-18T14:48:33.467654000Z + data_stream: + dataset: generic.otel + namespace: default + resource: + attributes: + k8s.container.name: myContainerName + k8s.cronjob.name: myCronJobName + k8s.job.name: myJobName + k8s.statefulset.name: myStatefulsetName + k8s.daemonset.name: myDaemonsetName + k8s.replicaset.name: myReplicasetName + k8s.node.uid: myNodeUid + k8s.node.hostname: myNodeHostname + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["kubernetes.container.name", "kubernetes.cronjob.name", "kubernetes.job.name", "kubernetes.statefulset.name", "kubernetes.daemonset.name", "kubernetes.replicaset.name", "kubernetes.node.uid", "kubernetes.node.hostname" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.kubernetes\.container\.name : ["myContainerName"] } + - match: { hits.hits.0.fields.kubernetes\.cronjob\.name : ["myCronJobName"] } + - match: { hits.hits.0.fields.kubernetes\.job\.name : ["myJobName"] } + - match: { hits.hits.0.fields.kubernetes\.statefulset\.name : ["myStatefulsetName"] } + - match: { hits.hits.0.fields.kubernetes\.daemonset\.name : ["myDaemonsetName"] } + - match: { hits.hits.0.fields.kubernetes\.replicaset\.name : ["myReplicasetName"] } + - match: { hits.hits.0.fields.kubernetes\.node\.uid : ["myNodeUid"] } + - match: { hits.hits.0.fields.kubernetes\.node\.hostname : ["myNodeHostname"] } From 31ede8fd284a79e1f62088d9800e59701f42b79a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:57:49 +0200 Subject: [PATCH 101/202] Update 8.12.0.asciidoc (#115303) (#115546) Fixing confusing format Co-authored-by: Johannes Mahne --- docs/reference/release-notes/8.12.0.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index bfa99401f41a2..bd0ae032ef0b9 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -11,7 +11,7 @@ Also see <>. + When using `int8_hnsw` and the default `confidence_interval` (or any `confidence_interval` less than `1.0`) and when there are deleted documents in the segments, quantiles may fail to build and prevent merging. - ++ This issue is fixed in 8.12.1. * When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, From aae3b3499a7e397bbd2f2cd7df0e218ec3f12caf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 00:57:55 +1100 Subject: [PATCH 102/202] Mute org.elasticsearch.test.apmintegration.MetricsApmIT testApmIntegration #115415 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2d5349ed03b48..1ee677b14fea1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 +- class: org.elasticsearch.test.apmintegration.MetricsApmIT + method: testApmIntegration + issue: https://github.com/elastic/elasticsearch/issues/115415 # Examples: # From fffb98ac6c68cc633afbb855f697d514f4185c9b Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:12:41 +0100 Subject: [PATCH 103/202] [ML] Set max allocations to 32 in default configs (#115518) --- .../services/elasticsearch/ElasticsearchInternalService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 6732e5719b897..a0235f74ce511 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -859,7 +859,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? ELSER_V2_MODEL_LINUX_X86 : ELSER_V2_MODEL, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), ElserMlNodeTaskSettings.DEFAULT, null // default chunking settings @@ -872,7 +872,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 : MULTILINGUAL_E5_SMALL_MODEL_ID, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), null // default chunking settings ); From 7d829fa51a13b2150ce7c0a08e3f5f66c9ee8bfb Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:14:29 +0100 Subject: [PATCH 104/202] [ML] Prevent NPE if model assignment is removed while waiting to start (#115430) --- docs/changelog/115430.yaml | 5 +++++ .../action/TransportStartTrainedModelDeploymentAction.java | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115430.yaml diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml new file mode 100644 index 0000000000000..c2903f7751012 --- /dev/null +++ b/docs/changelog/115430.yaml @@ -0,0 +1,5 @@ +pr: 115430 +summary: Prevent NPE if model assignment is removed while waiting to start +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 0bda2de2ce9ae..5fd70ce71cd24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -671,7 +671,11 @@ public boolean test(ClusterState clusterState) { deploymentId ).orElse(null); if (trainedModelAssignment == null) { - // Something weird happened, it should NEVER be null... + // The assignment may be null if it was stopped by another action while waiting + this.exception = new ElasticsearchStatusException( + "Error waiting for the model deployment to start. The trained model assignment was removed while waiting", + RestStatus.BAD_REQUEST + ); logger.trace(() -> format("[%s] assignment was null while waiting for state [%s]", deploymentId, waitForState)); return true; } From 755c392bb22e9046ef79982aba188f3c45193c8b Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Thu, 24 Oct 2024 15:24:26 +0100 Subject: [PATCH 105/202] Fix for race condition in interval watcher scheduler tests (#115501) --- muted-tests.yml | 12 ------------ .../schedule/engine/TickerScheduleEngineTests.java | 12 ++++-------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 1ee677b14fea1..ba816ed5f3a9e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -258,21 +258,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115339 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115354 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115356 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115368 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 9a12b8f394eb2..ef290628c06d5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -312,14 +312,13 @@ public void testWatchWithLastCheckedTimeExecutesBeforeInitialInterval() throws E engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -375,14 +374,13 @@ public void testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInit engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -428,14 +426,13 @@ public void testAddWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exc engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -492,14 +489,13 @@ public void testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitia engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); From d7a9575d0314adcc65b50c4972c585464c2aefa9 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Thu, 24 Oct 2024 15:58:24 +0100 Subject: [PATCH 106/202] Remove deprecated local parameter from alias APIs (#115393) This removes the `local` parameter from the `GET /_alias`, `HEAD /_alias`, and `GET /_cat/aliases` APIs. This option became a no-op and was deprecated in 8.12 by https://github.com/elastic/elasticsearch/pull/101815. We continue to accept the parameter (deprecated, with no other effect) in v8 compatibility mode for `GET /_alias` and `HEAD /_alias`. We don't do this for `GET /_cat/aliases` where the [compatibility policy does not apply](https://github.com/elastic/elasticsearch/blob/main/REST_API_COMPATIBILITY.md#when-not-to-apply). --- docs/changelog/115393.yaml | 18 ++++++++++++ docs/reference/cat/alias.asciidoc | 10 +++---- docs/reference/indices/alias-exists.asciidoc | 2 -- docs/reference/indices/get-alias.asciidoc | 2 -- rest-api-spec/build.gradle | 1 + .../rest-api-spec/api/cat.aliases.json | 4 --- .../api/indices.exists_alias.json | 4 --- .../rest-api-spec/api/indices.get_alias.json | 4 --- .../test/cat.aliases/10_basic.yml | 13 --------- .../test/indices.exists_alias/10_basic.yml | 14 --------- .../test/indices.get_alias/10_basic.yml | 29 ------------------- .../admin/indices/RestGetAliasesAction.java | 15 +++++----- .../rest/action/cat/RestAliasAction.java | 22 -------------- 13 files changed, 30 insertions(+), 108 deletions(-) create mode 100644 docs/changelog/115393.yaml diff --git a/docs/changelog/115393.yaml b/docs/changelog/115393.yaml new file mode 100644 index 0000000000000..5cf4e5f64ab34 --- /dev/null +++ b/docs/changelog/115393.yaml @@ -0,0 +1,18 @@ +pr: 115393 +summary: Remove deprecated local attribute from alias APIs +area: Indices APIs +type: breaking +issues: [] +breaking: + title: Remove deprecated local attribute from alias APIs + area: REST API + details: >- + The following APIs no longer accept the `?local` query parameter: + `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, + `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, + `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, + `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter + has been deprecated and ignored since version 8.12. + impact: >- + Cease usage of the `?local` query parameter when calling the listed APIs. + notable: false diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 72f949bf11e50..41ac279d3b2f5 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -6,8 +6,8 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or the -{kib} console. They are _not_ intended for use by applications. For application +cat APIs are only intended for human consumption using the command line or the +{kib} console. They are _not_ intended for use by applications. For application consumption, use the <>. ==== @@ -45,8 +45,6 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=help] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] @@ -104,6 +102,6 @@ alias4 test1 - 2 1,2 - This response shows that `alias2` has configured a filter, and specific routing configurations in `alias3` and `alias4`. -If you only want to get information about specific aliases, you can specify -the aliases in comma-delimited format as a URL parameter, e.g., +If you only want to get information about specific aliases, you can specify +the aliases in comma-delimited format as a URL parameter, e.g., /_cat/aliases/alias1,alias2. diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index f820a95028a0f..d7b3454dcff56 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -52,8 +52,6 @@ Defaults to `all`. (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - [[alias-exists-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 743aaf7aee174..41d62fb70e01b 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -58,5 +58,3 @@ Defaults to `all`. `ignore_unavailable`:: (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 4bd293f0a8641..6cc2028bffa39 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -60,4 +60,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") + task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index db49daeea372b..d3856b455efd1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -36,10 +36,6 @@ "type":"string", "description":"a short version of the Accept header, e.g. json, yaml" }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index b70854fdc3eb2..7d7a9c96c6419 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -61,10 +61,6 @@ ], "default":"all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json index 0a4e4bb9ed90c..dc02a65adb068 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json @@ -79,10 +79,6 @@ ], "default": "all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 2e5234bd1ced1..6118453d7805e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -484,16 +484,3 @@ test_alias \s+ test_index\n my_alias \s+ test_index\n $/ - ---- -"Deprecated local parameter": - - requires: - cluster_features: ["gte_v8.12.0"] - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - cat.aliases: - local: true - warnings: - - "the [?local=true] query parameter to cat-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml index bf499de8463bd..a4223c2a983be 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml @@ -34,17 +34,3 @@ name: test_alias - is_false: '' - ---- -"Test indices.exists_alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.exists_alias: - name: test_alias - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 4f26a69712e83..63ab40f3bf578 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -289,21 +289,6 @@ setup: index: non-existent name: foo ---- -"Get alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.get_alias: - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_true: test_index - - - is_true: test_index_2 - --- "Get alias against closed indices": - skip: @@ -329,17 +314,3 @@ setup: - is_true: test_index - is_false: test_index_2 - - ---- -"Deprecated local parameter": - - requires: - cluster_features: "gte_v8.12.0" - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - indices.get_alias: - local: true - warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 7780ae08ac0ff..dfe501f29ce2e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -52,7 +52,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) // reject the deprecated ?local parameter + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override @@ -199,8 +199,7 @@ static RestResponse buildRestResponse( } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here @@ -213,10 +212,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { + if (request.getRestApiVersion() == RestApiVersion.V_8) { + if (request.hasParam("local")) { + // consume this param just for validation when in BWC mode for V_8 + final var localParam = request.paramAsBoolean("local", false); DEPRECATION_LOGGER.critical( DeprecationCategory.API, "get-aliases-local", diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index 191746b421c98..6aa0b1c865682 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -15,10 +15,6 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; @@ -34,8 +30,6 @@ @ServerlessScope(Scope.PUBLIC) public class RestAliasAction extends AbstractCatAction { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestAliasAction.class); - @Override public List routes() { return List.of(new Route(GET, "/_cat/aliases"), new Route(GET, "/_cat/aliases/{alias}")); @@ -52,27 +46,11 @@ public boolean allowSystemIndexAccessByDefault() { } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ? new GetAliasesRequest(Strings.commaDelimitedListToStringArray(request.param("alias"))) : new GetAliasesRequest(); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { - DEPRECATION_LOGGER.critical( - DeprecationCategory.API, - "cat-aliases-local", - "the [?local={}] query parameter to cat-aliases requests has no effect and will be removed in a future version", - localParam - ); - } - } - return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() .getAliases(getAliasesRequest, new RestResponseListener<>(channel) { From d8a3fc22cde255dc9b7456ba1009bb8b45b7407d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 16:59:10 +0200 Subject: [PATCH 107/202] [test] Don't test any 7.x snapshots in `testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent` (#114821) Don't test any 7.x snapshots, keep using any 8,x compatible snapshot and Lucene version. Originally added in 8.0 (#77420) for testing peer recoveries using snapshots. Co-authored-by: Yang Wang Co-authored-by: Elastic Machine --- .../SnapshotsRecoveryPlannerServiceTests.java | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index b082698254d17..6e7f2d82cfb1d 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -26,15 +26,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetadata; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.plan.ShardRecoveryPlan; import org.elasticsearch.indices.recovery.plan.ShardSnapshot; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; @@ -63,7 +60,6 @@ import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.engine.Engine.ES_VERSION; import static org.elasticsearch.index.engine.Engine.HISTORY_UUID_KEY; -import static org.elasticsearch.test.index.IndexVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -203,8 +199,6 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener { boolean shareFilesWithSource = randomBoolean(); @@ -217,18 +211,8 @@ public void testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent() throw final IndexVersion snapshotVersion; final Version luceneVersion; if (compatibleVersion) { - snapshotVersion = randomBoolean() ? null : IndexVersionUtils.randomCompatibleVersion(random()); - // If snapshotVersion is not present, - // then lucene version must be < RecoverySettings.SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION - if (snapshotVersion == null) { - luceneVersion = randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION - ).luceneVersion(); - } else { - luceneVersion = IndexVersionUtils.randomCompatibleVersion(random()).luceneVersion(); - } + snapshotVersion = IndexVersionUtils.randomCompatibleVersion(random()); + luceneVersion = snapshotVersion.luceneVersion(); } else { snapshotVersion = IndexVersion.fromId(Integer.MAX_VALUE); luceneVersion = org.apache.lucene.util.Version.parse("255.255.255"); From f5d3c7c3d8bff7b91430c42d66550613e2716387 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 17:09:34 +0200 Subject: [PATCH 108/202] Remove legacy join validation transport protocol (#114571) We introduced a new join validation protocol in #85380 (8.3), the legacy protocol can be removed in 9.0 Remove assertion that we run a version after 8.3.0 --- .../coordination/JoinValidationService.java | 57 ++----------------- .../coordination/ValidateJoinRequest.java | 21 ++----- 2 files changed, 12 insertions(+), 66 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index 34d59c9860aba..7de7fd4d92d1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -13,7 +13,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +30,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -46,7 +44,6 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; -import java.util.Locale; import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -162,55 +159,14 @@ public void validateJoin(DiscoveryNode discoveryNode, ActionListener liste return; } - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - if (executeRefs.tryIncRef()) { - try { - execute(new JoinValidation(discoveryNode, connection, listener)); - } finally { - executeRefs.decRef(); - } - } else { - listener.onFailure(new NodeClosedException(transportService.getLocalNode())); + if (executeRefs.tryIncRef()) { + try { + execute(new JoinValidation(discoveryNode, connection, listener)); + } finally { + executeRefs.decRef(); } } else { - legacyValidateJoin(discoveryNode, listener, connection); - } - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private void legacyValidateJoin(DiscoveryNode discoveryNode, ActionListener listener, Transport.Connection connection) { - final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { - logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); - listener.onFailure( - new IllegalStateException( - String.format( - Locale.ROOT, - "failure when sending a join validation request from [%s] to [%s]", - transportService.getLocalNode().descriptionWithoutAttributes(), - discoveryNode.descriptionWithoutAttributes() - ), - e - ) - ); - })); - final var clusterState = clusterStateSupplier.get(); - if (clusterState != null) { - assert clusterState.nodes().isLocalNodeElectedMaster(); - transportService.sendRequest( - connection, - JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(clusterState), - REQUEST_OPTIONS, - responseHandler - ); - } else { - transportService.sendRequest( - connection, - JoinHelper.JOIN_PING_ACTION_NAME, - new JoinHelper.JoinPingRequest(), - REQUEST_OPTIONS, - responseHandler - ); + listener.onFailure(new NodeClosedException(transportService.getLocalNode())); } } @@ -341,7 +297,6 @@ private class JoinValidation extends ActionRunnable { @Override protected void doRun() { - assert connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) : discoveryNode.getVersion(); // NB these things never run concurrently to each other, or to the cache cleaner (see IMPLEMENTATION NOTES above) so it is safe // to do these (non-atomic) things to the (unsynchronized) statesByVersion map. var transportVersion = connection.getTransportVersion(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index 1d99f28e62582..c81e4877196b3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -29,19 +28,12 @@ public class ValidateJoinRequest extends TransportRequest { public ValidateJoinRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - // recent versions send a BytesTransportRequest containing a compressed representation of the state - final var bytes = in.readReleasableBytesReference(); - final var version = in.getTransportVersion(); - final var namedWriteableRegistry = in.namedWriteableRegistry(); - this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); - this.refCounted = bytes; - } else { - // older versions just contain the bare state - final var state = ClusterState.readFrom(in, null); - this.stateSupplier = () -> state; - this.refCounted = null; - } + // recent versions send a BytesTransportRequest containing a compressed representation of the state + final var bytes = in.readReleasableBytesReference(); + final var version = in.getTransportVersion(); + final var namedWriteableRegistry = in.namedWriteableRegistry(); + this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); + this.refCounted = bytes; } private static ClusterState readCompressed( @@ -68,7 +60,6 @@ public ValidateJoinRequest(ClusterState state) { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().before(TransportVersions.V_8_3_0); super.writeTo(out); stateSupplier.get().writeTo(out); } From 2ddd08aff7bea3a4ef1e4aea28d2ae63518902a1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 10:10:36 -0500 Subject: [PATCH 109/202] Fixing ingest simulate yaml rest test when there is a global legacy template (#115559) The ingest simulate yaml rest test `Test mapping addition works with indices without templates` tests what happens when an index has a mapping but matches no template at all. However, randomly and rarely a global match-all legacy template is applied to the cluster. When this happens, the assumptions for the test fail since the index matches a template. This PR removes that global legacy template so that the test works as intended. Closes #115412 Closes #115472 --- muted-tests.yml | 3 --- .../rest-api-spec/test/ingest/80_ingest_simulate.yml | 7 +++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ba816ed5f3a9e..8c90f73f475e6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -264,9 +264,6 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} - issue: https://github.com/elastic/elasticsearch/issues/115412 - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 4d1a62c6f179e..7ed5ad3154151 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1586,6 +1586,13 @@ setup: cluster_features: ["simulate.support.non.template.mapping"] reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" + # A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists + # because this test is making sure we get correct behavior when an index matches *no* template: + - do: + indices.delete_template: + name: '*' + ignore: 404 + # First, make sure that validation fails before we create the index (since we are only defining to bar field but trying to index a value # for foo. - do: From 79be69a5f87da015e6105a84537c590ae68c197b Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:22:13 +0300 Subject: [PATCH 110/202] Ignore _field_names warning in testRollupAfterRestart (#115563) --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index c57e5653d1279..a56ddaabe8280 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -435,6 +435,7 @@ public void testRollupAfterRestart() throws Exception { final Request bulkRequest = new Request("POST", "/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); + bulkRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk())); client().performRequest(bulkRequest); // create the rollup job From fb6c729858b443956ba41c68495a5de084ffa73d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 08:47:52 -0700 Subject: [PATCH 111/202] Guard blob store local directory creation with doPrivileged (#115459) The blob store may be triggered to create a local directory while in a reduced privilege context. This commit guards the creation of directories with doPrivileged. --- docs/changelog/115459.yaml | 5 +++++ .../common/blobstore/fs/FsBlobStore.java | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/115459.yaml diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml new file mode 100644 index 0000000000000..b20a8f765c084 --- /dev/null +++ b/docs/changelog/115459.yaml @@ -0,0 +1,5 @@ +pr: 115459 +summary: Guard blob store local directory creation with `doPrivileged` +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index c4240672239fa..53e3b4b4796dc 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Iterator; import java.util.List; @@ -56,11 +58,14 @@ public int bufferSizeInBytes() { public BlobContainer blobContainer(BlobPath path) { Path f = buildPath(path); if (readOnly == false) { - try { - Files.createDirectories(f); - } catch (IOException ex) { - throw new ElasticsearchException("failed to create blob container", ex); - } + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + Files.createDirectories(f); + } catch (IOException ex) { + throw new ElasticsearchException("failed to create blob container", ex); + } + return null; + }); } return new FsBlobContainer(this, path, f); } From 482d2aced5f888d548a755e0fe20fc6f83125d11 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 17:58:36 +0200 Subject: [PATCH 112/202] Remove unused elasticsearch cloud docker image (#115357) --- .../gradle/internal/DockerBase.java | 3 --- distribution/docker/build.gradle | 25 +++---------------- .../cloud-docker-aarch64-export/build.gradle | 2 -- .../docker/cloud-docker-export/build.gradle | 2 -- .../build.gradle | 2 -- .../wolfi-ess-docker-export/build.gradle | 2 -- .../packaging/test/DockerTests.java | 11 +++----- .../test/KeystoreManagementTests.java | 5 +--- .../packaging/test/PackagingTestCase.java | 6 ++--- .../packaging/util/Distribution.java | 5 +--- .../packaging/util/docker/Docker.java | 2 +- .../packaging/util/docker/DockerRun.java | 1 - settings.gradle | 2 -- 13 files changed, 12 insertions(+), 56 deletions(-) delete mode 100644 distribution/docker/cloud-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/cloud-docker-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-export/build.gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index fb52daf7e164f..0535f0bdc3cc8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -21,9 +21,6 @@ public enum DockerBase { // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), - // Base image with extras for Cloud - CLOUD("ubuntu:20.04", "-cloud", "apt-get"), - // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index e40ac68bbacf4..788e836f8f045 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -288,20 +288,6 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { } } - if (base == DockerBase.CLOUD) { - // If we're performing a release build, but `build.id` hasn't been set, we can - // infer that we're not at the Docker building stage of the build, and therefore - // we should skip the beats part of the build. - String buildId = providers.systemProperty('build.id').getOrNull() - boolean includeBeats = VersionProperties.isElasticsearchSnapshot() == true || buildId != null || useDra - - if (includeBeats) { - from configurations.getByName("filebeat_${architecture.classifier}") - from configurations.getByName("metricbeat_${architecture.classifier}") - } - // For some reason, the artifact name can differ depending on what repository we used. - rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz" - } Provider serviceProvider = GradleUtils.getBuildService( project.gradle.sharedServices, DockerSupportPlugin.DOCKER_SUPPORT_SERVICE_NAME @@ -381,7 +367,7 @@ private static List generateTags(DockerBase base, Architecture architect String image = "elasticsearch${base.suffix}" String namespace = 'elasticsearch' - if (base == DockerBase.CLOUD || base == DockerBase.CLOUD_ESS) { + if (base == base == DockerBase.CLOUD_ESS) { namespace += '-ci' } @@ -439,7 +425,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { } - if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD && base != DockerBase.CLOUD_ESS) { + if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD_ESS) { tasks.named("assemble").configure { dependsOn(buildDockerImageTask) } @@ -548,10 +534,6 @@ subprojects { Project subProject -> base = DockerBase.IRON_BANK } else if (subProject.name.contains('cloud-ess-')) { base = DockerBase.CLOUD_ESS - } else if (subProject.name.contains('cloud-')) { - base = DockerBase.CLOUD - } else if (subProject.name.contains('wolfi-ess')) { - base = DockerBase.WOLFI_ESS } else if (subProject.name.contains('wolfi-')) { base = DockerBase.WOLFI } @@ -559,10 +541,9 @@ subprojects { Project subProject -> final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' final String extension = base == DockerBase.UBI ? 'ubi.tar' : (base == DockerBase.IRON_BANK ? 'ironbank.tar' : - (base == DockerBase.CLOUD ? 'cloud.tar' : (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : (base == DockerBase.WOLFI ? 'wolfi.tar' : - 'docker.tar')))) + 'docker.tar'))) final String artifactName = "elasticsearch${arch}${base.suffix}_test" final String exportTaskName = taskName("export", architecture, base, 'DockerImage') diff --git a/distribution/docker/cloud-docker-aarch64-export/build.gradle b/distribution/docker/cloud-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/cloud-docker-export/build.gradle b/distribution/docker/cloud-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle b/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-export/build.gradle b/distribution/docker/wolfi-ess-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 4ca97bff42333..8cb8354eb5d71 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -169,10 +169,7 @@ public void test012SecurityCanBeDisabled() throws Exception { * Checks that no plugins are initially active. */ public void test020PluginsListWithNoPlugins() { - assumeTrue( - "Only applies to non-Cloud images", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS - ); + assumeTrue("Only applies to non-Cloud images", distribution().packaging != Packaging.DOCKER_CLOUD_ESS); final Installation.Executables bin = installation.executables(); final Result r = sh.run(bin.pluginTool + " list"); @@ -1116,8 +1113,8 @@ public void test170DefaultShellIsBash() { */ public void test171AdditionalCliOptionsAreForwarded() throws Exception { assumeTrue( - "Does not apply to Cloud and Cloud ESS images, because they don't use the default entrypoint", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS + "Does not apply to Cloud ESS images, because they don't use the default entrypoint", + distribution().packaging != Packaging.DOCKER_CLOUD_ESS ); runContainer(distribution(), builder().runArgs("bin/elasticsearch", "-Ecluster.name=kimchy").envVar("ELASTIC_PASSWORD", PASSWORD)); @@ -1204,7 +1201,7 @@ public void test310IronBankImageHasNoAdditionalLabels() throws Exception { * Check that the Cloud image contains the required Beats */ public void test400CloudImageBundlesBeats() { - assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD || distribution.packaging == Packaging.DOCKER_CLOUD_ESS); + assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD_ESS); final List contents = listContents("/opt"); assertThat("Expected beats in /opt", contents, hasItems("filebeat", "metricbeat")); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index a988a446f561f..02e1ce35764cf 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -436,10 +436,7 @@ private void verifyKeystorePermissions() { switch (distribution.packaging) { case TAR, ZIP -> assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); case DEB, RPM -> assertThat(keystore, file(File, "root", "elasticsearch", p660)); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat( - keystore, - DockerFileMatcher.file(p660) - ); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat(keystore, DockerFileMatcher.file(p660)); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index 644990105f60f..b4a00ca56924a 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -245,7 +245,7 @@ protected static void install() throws Exception { installation = Packages.installPackage(sh, distribution); Packages.verifyPackageInstallation(installation, distribution, sh); } - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { installation = Docker.runContainer(distribution); Docker.verifyContainerInstallation(installation); } @@ -335,7 +335,6 @@ public Shell.Result runElasticsearchStartCommand(String password, boolean daemon case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -358,7 +357,6 @@ public void stopElasticsearch() throws Exception { case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -373,7 +371,7 @@ public void awaitElasticsearchStartup(Shell.Result result) throws Exception { switch (distribution.packaging) { case TAR, ZIP -> Archives.assertElasticsearchStarted(installation); case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 05cef4a0818ba..11b8324384631 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -33,8 +33,6 @@ public Distribution(Path path) { this.packaging = Packaging.DOCKER_UBI; } else if (filename.endsWith(".ironbank.tar")) { this.packaging = Packaging.DOCKER_IRON_BANK; - } else if (filename.endsWith(".cloud.tar")) { - this.packaging = Packaging.DOCKER_CLOUD; } else if (filename.endsWith(".cloud-ess.tar")) { this.packaging = Packaging.DOCKER_CLOUD_ESS; } else if (filename.endsWith(".wolfi.tar")) { @@ -63,7 +61,7 @@ public boolean isPackage() { */ public boolean isDocker() { return switch (packaging) { - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; default -> false; }; } @@ -77,7 +75,6 @@ public enum Packaging { DOCKER(".docker.tar", Platforms.isDocker()), DOCKER_UBI(".ubi.tar", Platforms.isDocker()), DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker()), - DOCKER_CLOUD(".cloud.tar", Platforms.isDocker()), DOCKER_CLOUD_ESS(".cloud-ess.tar", Platforms.isDocker()), DOCKER_WOLFI(".wolfi.tar", Platforms.isDocker()); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index c38eaa58f0552..0cd2823080b9b 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -532,7 +532,7 @@ public static void verifyContainerInstallation(Installation es) { ) ); - if (es.distribution.packaging == Packaging.DOCKER_CLOUD || es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { + if (es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { verifyCloudContainerInstallation(es); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index e562e7591564e..e3eac23d3ecce 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -165,7 +165,6 @@ public static String getImageName(Distribution distribution) { case DOCKER -> ""; case DOCKER_UBI -> "-ubi"; case DOCKER_IRON_BANK -> "-ironbank"; - case DOCKER_CLOUD -> "-cloud"; case DOCKER_CLOUD_ESS -> "-cloud-ess"; case DOCKER_WOLFI -> "-wolfi"; default -> throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging); diff --git a/settings.gradle b/settings.gradle index a95a46a3569d7..39453e8d0935a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -63,8 +63,6 @@ List projects = [ 'distribution:archives:linux-aarch64-tar', 'distribution:archives:linux-tar', 'distribution:docker', - 'distribution:docker:cloud-docker-export', - 'distribution:docker:cloud-docker-aarch64-export', 'distribution:docker:cloud-ess-docker-export', 'distribution:docker:cloud-ess-docker-aarch64-export', 'distribution:docker:docker-aarch64-export', From d500daf2e16bb3b6fb4bdde49bbf9d93b7fec25b Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:02:11 +0200 Subject: [PATCH 113/202] [DOCS][101] Add BYO vectors ingestion tutorial (#115112) --- docs/reference/images/semantic-options.svg | 62 ++++++++ .../search-your-data/ingest-vectors.asciidoc | 141 ++++++++++++++++++ .../search-your-data/semantic-search.asciidoc | 3 + 3 files changed, 206 insertions(+) create mode 100644 docs/reference/images/semantic-options.svg create mode 100644 docs/reference/search/search-your-data/ingest-vectors.asciidoc diff --git a/docs/reference/images/semantic-options.svg b/docs/reference/images/semantic-options.svg new file mode 100644 index 0000000000000..3bedf5307357e --- /dev/null +++ b/docs/reference/images/semantic-options.svg @@ -0,0 +1,62 @@ + + + + Elasticsearch semantic search workflows + + + + + + semantic_text + (Recommended) + + + + Inference API + + + + Model Deployment + + + Complexity: Low + Complexity: Medium + Complexity: High + + + + + + Create Inference Endpoint + + + Define Index Mapping + + + + Create Inference Endpoint + + + Configure Model Settings + + + Define Index Mapping + + + Setup Ingest Pipeline + + + + Select NLP Model + + + Deploy with Eland Client + + + Define Index Mapping + + + Setup Ingest Pipeline + + + diff --git a/docs/reference/search/search-your-data/ingest-vectors.asciidoc b/docs/reference/search/search-your-data/ingest-vectors.asciidoc new file mode 100644 index 0000000000000..f288293d2b03a --- /dev/null +++ b/docs/reference/search/search-your-data/ingest-vectors.asciidoc @@ -0,0 +1,141 @@ +[[bring-your-own-vectors]] +=== Bring your own dense vector embeddings to {es} +++++ +Bring your own dense vectors +++++ + +This tutorial demonstrates how to index documents that already have dense vector embeddings into {es}. +You'll also learn the syntax for searching these documents using a `knn` query. + +You'll find links at the end of this tutorial for more information about deploying a text embedding model in {es}, so you can generate embeddings for queries on the fly. + +[TIP] +==== +This is an advanced use case. +Refer to <> for an overview of your options for semantic search with {es}. +==== + +[discrete] +[[bring-your-own-vectors-create-index]] +=== Step 1: Create an index with `dense_vector` mapping + +Each document in our simple dataset will have: + +* A review: stored in a `review_text` field +* An embedding of that review: stored in a `review_vector` field +** The `review_vector` field is defined as a <> data type. + +[TIP] +==== +The `dense_vector` type automatically uses `int8_hnsw` quantization by default to reduce the memory footprint required when searching float vectors. +Learn more about balancing performance and accuracy in <>. +==== + +[source,console] +---- +PUT /amazon-reviews +{ + "mappings": { + "properties": { + "review_vector": { + "type": "dense_vector", + "dims": 8, <1> + "index": true, <2> + "similarity": "cosine" <3> + }, + "review_text": { + "type": "text" + } + } + } +} +---- +// TEST SETUP +<1> The `dims` parameter must match the length of the embedding vector. Here we're using a simple 8-dimensional embedding for readability. If not specified, `dims` will be dynamically calculated based on the first indexed document. +<2> The `index` parameter is set to `true` to enable the use of the `knn` query. +<3> The `similarity` parameter defines the similarity function used to compare the query vector to the document vectors. `cosine` is the default similarity function for `dense_vector` fields in {es}. + +[discrete] +[[bring-your-own-vectors-index-documents]] +=== Step 2: Index documents with embeddings + +[discrete] +==== Index a single document + +First, index a single document to understand the document structure. + +[source,console] +---- +PUT /amazon-reviews/_doc/1 +{ + "review_text": "This product is lifechanging! I'm telling all my friends about it.", + "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] <1> +} +---- +// TEST +<1> The size of the `review_vector` array is 8, matching the `dims` count specified in the mapping. + +[discrete] +==== Bulk index multiple documents + +In a production scenario, you'll want to index many documents at once using the <>. + +Here's an example of indexing multiple documents in a single `_bulk` request. + +[source,console] +---- +POST /_bulk +{ "index": { "_index": "amazon-reviews", "_id": "2" } } +{ "review_text": "This product is amazing! I love it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "3" } } +{ "review_text": "This product is terrible. I hate it.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +{ "index": { "_index": "amazon-reviews", "_id": "4" } } +{ "review_text": "This product is great. I can do anything with it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "5" } } +{ "review_text": "This product has ruined my life and the lives of my family and friends.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +---- +// TEST[continued] + +[discrete] +[[bring-your-own-vectors-search-documents]] +=== Step 3: Search documents with embeddings + +Now you can query these document vectors using a <>. +`knn` is a type of vector search, which finds the `k` most similar documents to a query vector. +Here we're simply using a raw vector for the query text, for demonstration purposes. + +[source,console] +---- +POST /amazon-reviews/_search +{ + "retriever": { + "knn": { + "field": "review_vector", + "query_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], <1> + "k": 2, <2> + "num_candidates": 5 <3> + } + } +} +---- +// TEST[skip:flakeyknnerror] +<1> In this simple example, we're sending a raw vector as the query text. In a real-world scenario, you'll need to generate vectors for queries using an embedding model. +<2> The `k` parameter specifies the number of results to return. +<3> The `num_candidates` parameter is optional. It limits the number of candidates returned by the search node. This can improve performance and reduce costs. + +[discrete] +[[bring-your-own-vectors-learn-more]] +=== Learn more + +In this simple example, we're sending a raw vector for the query text. +In a real-world scenario you won't know the query text ahead of time. +You'll need to generate query vectors, on the fly, using the same embedding model that generated the document vectors. + +For this you'll need to deploy a text embedding model in {es} and use the <>. Alternatively, you can generate vectors client-side and send them directly with the search request. + +Learn how to <> for semantic search. + +[TIP] +==== +If you're just getting started with vector search in {es}, refer to <>. +==== diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 0ef8591e42b5d..e0fb8415fee18 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -8,6 +8,8 @@ Using an NLP model enables you to extract text embeddings out of text. Embeddings are vectors that provide a numeric representation of a text. Pieces of content with similar meaning have similar representations. +image::images/semantic-options.svg[Overview of semantic search workflows in {es}] + You have several options for using NLP models in the {stack}: * use the `semantic_text` workflow (recommended) @@ -109,3 +111,4 @@ include::semantic-search-inference.asciidoc[] include::semantic-search-elser.asciidoc[] include::cohere-es.asciidoc[] include::semantic-search-deploy-model.asciidoc[] +include::ingest-vectors.asciidoc[] From a270ee3f9c3e0dcfdd2874d8f64b9612098ddaf3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 03:05:08 +1100 Subject: [PATCH 114/202] Mute org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT test {yaml=reference/esql/esql-across-clusters/line_197} #115575 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8c90f73f475e6..ab5d686a041c1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.test.apmintegration.MetricsApmIT method: testApmIntegration issue: https://github.com/elastic/elasticsearch/issues/115415 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/esql/esql-across-clusters/line_197} + issue: https://github.com/elastic/elasticsearch/issues/115575 # Examples: # From c64226c3503b458c3285064d95528932d324177d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 18:19:14 +0200 Subject: [PATCH 115/202] Don't return or accept `node_version` in the Desired Nodes API (#114580) It was deprecated in #104209 (8.13) and shouldn't be set or returned in 9.0 The Desired Nodes API is an internal API, and users shouldn't depend on its backward compatibility. --- .../upgrades/DesiredNodesUpgradeIT.java | 13 +-- rest-api-spec/build.gradle | 2 + .../test/cluster.desired_nodes/10_basic.yml | 95 ------------------- .../cluster/metadata/DesiredNode.java | 77 +-------------- .../metadata/DesiredNodeWithStatus.java | 5 +- .../cluster/RestUpdateDesiredNodesAction.java | 12 --- 6 files changed, 13 insertions(+), 191 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0d1e7aafa637..17618d5439d48 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -82,8 +81,7 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), 1238.49922909, ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(128) ) ) .toList(); @@ -153,8 +151,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); @@ -167,8 +164,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ); }).toList(); } @@ -182,8 +178,7 @@ private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) thr Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), randomIntBetween(1, 24), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 6cc2028bffa39..1a398f79085e7 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -61,4 +61,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") + task.skipTest("cluster.desired_nodes/10_basic/Test delete desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") + task.skipTest("cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml index 1d1aa524ffb21..a45146a4e147a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml @@ -59,61 +59,6 @@ teardown: - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb" } } - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb" } } --- -"Test update desired nodes with node_version generates a warning": - - skip: - reason: "contains is a newly added assertion" - features: ["contains", "allowed_warnings"] - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 2 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - { settings: { "node.name": "instance-000188" }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - - match: { history_id: "test" } - - match: { version: 2 } - - length: { nodes: 2 } - - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } } - - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } } ---- "Test update move to a new history id": - skip: reason: "contains is a newly added assertion" @@ -199,46 +144,6 @@ teardown: _internal.get_desired_nodes: {} - match: { status: 404 } --- -"Test delete desired nodes with node_version generates a warning": - - skip: - features: allowed_warnings - - do: - cluster.state: {} - - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.external_id": "instance-000187" }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { external_id: "instance-000187" } }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.delete_desired_nodes: {} - - - do: - catch: missing - _internal.get_desired_nodes: {} - - match: { status: 404 } ---- "Test update desired nodes is idempotent": - skip: reason: "contains is a newly added assertion" diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fb8559b19d81d..fe72a59565cf6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -22,7 +21,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -38,7 +36,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.function.Predicate; -import java.util.regex.Pattern; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -58,8 +55,6 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl private static final ParseField PROCESSORS_RANGE_FIELD = new ParseField("processors_range"); private static final ParseField MEMORY_FIELD = new ParseField("memory"); private static final ParseField STORAGE_FIELD = new ParseField("storage"); - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field - private static final ParseField VERSION_FIELD = new ParseField("node_version"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "desired_node", @@ -69,8 +64,7 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl (Processors) args[1], (ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ) ); @@ -104,12 +98,6 @@ static void configureParser(ConstructingObjectParser parser) { STORAGE_FIELD, ObjectParser.ValueType.STRING ); - parser.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> p.text(), - VERSION_FIELD, - ObjectParser.ValueType.STRING - ); } private final Settings settings; @@ -118,21 +106,9 @@ static void configureParser(ConstructingObjectParser parser) { private final ByteSizeValue memory; private final ByteSizeValue storage; - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated version field - private final String version; private final String externalId; private final Set roles; - @Deprecated - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, null, processorsRange, memory, storage, version); - } - - @Deprecated - public DesiredNode(Settings settings, double processors, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, Processors.of(processors), null, memory, storage, version); - } - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { this(settings, null, processorsRange, memory, storage); } @@ -142,17 +118,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B } DesiredNode(Settings settings, Processors processors, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { - this(settings, processors, processorsRange, memory, storage, null); - } - - DesiredNode( - Settings settings, - Processors processors, - ProcessorsRange processorsRange, - ByteSizeValue memory, - ByteSizeValue storage, - @Deprecated String version - ) { assert settings != null; assert memory != null; assert storage != null; @@ -186,7 +151,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B this.processorsRange = processorsRange; this.memory = memory; this.storage = storage; - this.version = version; this.externalId = NODE_EXTERNAL_ID_SETTING.get(settings); this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(DiscoveryNode.getRolesFromSettings(settings))); } @@ -210,19 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { } else { version = Version.readVersion(in).toString(); } - return new DesiredNode(settings, processors, processorsRange, memory, storage, version); - } - - private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); - - private static Version parseLegacyVersion(String version) { - if (version != null) { - var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); - if (semanticVersionMatcher.matches()) { - return Version.fromString(semanticVersionMatcher.group(1)); - } - } - return null; + return new DesiredNode(settings, processors, processorsRange, memory, storage); } @Override @@ -239,15 +191,9 @@ public void writeTo(StreamOutput out) throws IOException { memory.writeTo(out); storage.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalString(version); + out.writeOptionalString(null); } else { - Version parsedVersion = parseLegacyVersion(version); - if (version == null) { - // Some node is from before we made the version field not required. If so, fill in with the current node version. - Version.writeVersion(Version.CURRENT, out); - } else { - Version.writeVersion(parsedVersion, out); - } + Version.writeVersion(Version.CURRENT, out); } } @@ -275,14 +221,6 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc } builder.field(MEMORY_FIELD.getPreferredName(), memory); builder.field(STORAGE_FIELD.getPreferredName(), storage); - addDeprecatedVersionField(builder); - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field from response - private void addDeprecatedVersionField(XContentBuilder builder) throws IOException { - if (version != null) { - builder.field(VERSION_FIELD.getPreferredName(), version); - } } public boolean hasMasterRole() { @@ -366,7 +304,6 @@ private boolean equalsWithoutProcessorsSpecification(DesiredNode that) { return Objects.equals(settings, that.settings) && Objects.equals(memory, that.memory) && Objects.equals(storage, that.storage) - && Objects.equals(version, that.version) && Objects.equals(externalId, that.externalId) && Objects.equals(roles, that.roles); } @@ -379,7 +316,7 @@ public boolean equalsWithProcessorsCloseTo(DesiredNode that) { @Override public int hashCode() { - return Objects.hash(settings, processors, processorsRange, memory, storage, version, externalId, roles); + return Objects.hash(settings, processors, processorsRange, memory, storage, externalId, roles); } @Override @@ -408,10 +345,6 @@ public String toString() { + '}'; } - public boolean hasVersion() { - return Strings.isNullOrBlank(version) == false; - } - public record ProcessorsRange(Processors min, @Nullable Processors max) implements Writeable, ToXContentObject { private static final ParseField MIN_FIELD = new ParseField("min"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 7b89406be9aa0..606309adf205c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -44,13 +44,12 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) (Processors) args[1], (DesiredNode.ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as // master, the desired nodes status update happens in NodeJoinExecutor. - args[6] == null ? Status.PENDING : (Status) args[6] + args[5] == null ? Status.PENDING : (Status) args[5] ) ); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index ec8bb6285bdd4..b8e1fa0c836a3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -12,13 +12,11 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -67,16 +65,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } - if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { - deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); - } - } else { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { - throw new XContentParseException("[node_version] field is required and must have a valid value"); - } - } - return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, From ebec1a2fe2bc2b9fc40401074dbbb0dbcdc800bd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:25:38 +0200 Subject: [PATCH 116/202] Improve Logsdb docs including default values (#115205) This PR adds detailed documentation for `logsdb` mode, covering several key aspects of its default behavior and configuration options. It includes: - default settings for index sorting (`index.sort.field`, `index.sort.order`, etc.). - usage of synthetic `_source` by default. - information about specialized codecs and how users can override them. - default behavior for `ignore_malformed` and `ignore_above` settings, including precedence rules. - explanation of how fields without `doc_values` are handled and what we do if they are missing. --- docs/reference/data-streams/logs.asciidoc | 180 +++++++++++++++++++++- 1 file changed, 172 insertions(+), 8 deletions(-) diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc index e870289bcf7be..6bb98684544a3 100644 --- a/docs/reference/data-streams/logs.asciidoc +++ b/docs/reference/data-streams/logs.asciidoc @@ -8,14 +8,6 @@ A logs data stream is a data stream type that stores log data more efficiently. In benchmarks, log data stored in a logs data stream used ~2.5 times less disk space than a regular data stream. The exact impact will vary depending on your data set. -The following features are enabled in a logs data stream: - -* <>, which omits storing the `_source` field. When the document source is requested, it is synthesized from document fields upon retrieval. - -* Index sorting. This yields a lower storage footprint. By default indices are sorted by `host.name` and `@timestamp` fields at index time. - -* More space efficient compression for fields with <> enabled. - [discrete] [[how-to-use-logsds]] === Create a logs data stream @@ -50,3 +42,175 @@ DELETE _index_template/my-index-template ---- // TEST[continued] //// + +[[logsdb-default-settings]] + +[discrete] +[[logsdb-synthtic-source]] +=== Synthetic source + +By default, `logsdb` mode uses <>, which omits storing the original `_source` +field and synthesizes it from doc values or stored fields upon document retrieval. Synthetic source comes with a few +restrictions which you can read more about in the <> section dedicated to it. + +NOTE: When dealing with multi-value fields, the `index.mapping.synthetic_source_keep` setting controls how field values +are preserved for <> reconstruction. In `logsdb`, the default value is `arrays`, +which retains both duplicate values and the order of entries but not necessarily the exact structure when it comes to +array elements or objects. Preserving duplicates and ordering could be critical for some log fields. This could be the +case, for instance, for DNS A records, HTTP headers, or log entries that represent sequential or repeated events. + +For more details on this setting and ways to refine or bypass it, check out <>. + +[discrete] +[[logsdb-sort-settings]] +=== Index sort settings + +The following settings are applied by default when using the `logsdb` mode for index sorting: + +* `index.sort.field`: `["host.name", "@timestamp"]` + In `logsdb` mode, indices are sorted by `host.name` and `@timestamp` fields by default. For data streams, the + `@timestamp` field is automatically injected if it is not present. + +* `index.sort.order`: `["desc", "desc"]` + The default sort order for both fields is descending (`desc`), prioritizing the latest data. + +* `index.sort.mode`: `["min", "min"]` + The default sort mode is `min`, ensuring that indices are sorted by the minimum value of multi-value fields. + +* `index.sort.missing`: `["_first", "_first"]` + Missing values are sorted to appear first (`_first`) in `logsdb` index mode. + +`logsdb` index mode allows users to override the default sort settings. For instance, users can specify their own fields +and order for sorting by modifying the `index.sort.field` and `index.sort.order`. + +When using default sort settings, the `host.name` field is automatically injected into the mappings of the +index as a `keyword` field to ensure that sorting can be applied. This guarantees that logs are efficiently sorted and +retrieved based on the `host.name` and `@timestamp` fields. + +NOTE: If `subobjects` is set to `true` (which is the default), the `host.name` field will be mapped as an object field +named `host`, containing a `name` child field of type `keyword`. On the other hand, if `subobjects` is set to `false`, +a single `host.name` field will be mapped as a `keyword` field. + +Once an index is created, the sort settings are immutable and cannot be modified. To apply different sort settings, +a new index must be created with the desired configuration. For data streams, this can be achieved by means of an index +rollover after updating relevant (component) templates. + +If the default sort settings are not suitable for your use case, consider modifying them. Keep in mind that sort +settings can influence indexing throughput, query latency, and may affect compression efficiency due to the way data +is organized after sorting. For more details, refer to our documentation on +<>. + +NOTE: For <>, the `@timestamp` field is automatically injected if not already present. +However, if custom sort settings are applied, the `@timestamp` field is injected into the mappings, but it is not +automatically added to the list of sort fields. + +[discrete] +[[logsdb-specialized-codecs]] +=== Specialized codecs + +`logsdb` index mode uses the `best_compression` <> by default, which applies {wikipedia}/Zstd[ZSTD] +compression to stored fields. Users are allowed to override it and switch to the `default` codec for faster compression +at the expense of slightly larger storage footprint. + +`logsdb` index mode also adopts specialized codecs for numeric doc values that are crafted to optimize storage usage. +Users can rely on these specialized codecs being applied by default when using `logsdb` index mode. + +Doc values encoding for numeric fields in `logsdb` follows a static sequence of codecs, applying each one in the +following order: delta encoding, offset encoding, Greatest Common Divisor GCD encoding, and finally Frame Of Reference +(FOR) encoding. The decision to apply each encoding is based on heuristics determined by the data distribution. +For example, before applying delta encoding, the algorithm checks if the data is monotonically non-decreasing or +non-increasing. If the data fits this pattern, delta encoding is applied; otherwise, the next encoding is considered. + +The encoding is specific to each Lucene segment and is also re-applied at segment merging time. The merged Lucene segment +may use a different encoding compared to the original Lucene segments, based on the characteristics of the merged data. + +The following methods are applied sequentially: + +* **Delta encoding**: + a compression method that stores the difference between consecutive values instead of the actual values. + +* **Offset encoding**: + a compression method that stores the difference from a base value rather than between consecutive values. + +* **Greatest Common Divisor (GCD) encoding**: + a compression method that finds the greatest common divisor of a set of values and stores the differences + as multiples of the GCD. + +* **Frame Of Reference (FOR) encoding**: + a compression method that determines the smallest number of bits required to encode a block of values and uses + bit-packing to fit such values into larger 64-bit blocks. + +For keyword fields, **Run Length Encoding (RLE)** is applied to the ordinals, which represent positions in the Lucene +segment-level keyword dictionary. This compression is used when multiple consecutive documents share the same keyword. + +[discrete] +[[logsdb-ignored-settings]] +=== `ignore_malformed`, `ignore_above`, `ignore_dynamic_beyond_limit` + +By default, `logsdb` index mode sets `ignore_malformed` to `true`. This setting allows documents with malformed fields +to be indexed without causing indexing failures, ensuring that log data ingestion continues smoothly even when some +fields contain invalid or improperly formatted data. + +Users can override this setting by setting `index.mapping.ignore_malformed` to `false`. However, this is not recommended +as it might result in documents with malformed fields being rejected and not indexed at all. + +In `logsdb` index mode, the `index.mapping.ignore_above` setting is applied by default at the index level to ensure +efficient storage and indexing of large keyword fields.The index-level default for `ignore_above` is set to 8191 +**characters**. If using UTF-8 encoding, this results in a limit of 32764 bytes, depending on character encoding. +The mapping-level `ignore_above` setting still takes precedence. If a specific field has an `ignore_above` value +defined in its mapping, that value will override the index-level `index.mapping.ignore_above` value. This default +behavior helps to optimize indexing performance by preventing excessively large string values from being indexed, while +still allowing users to customize the limit, overriding it at the mapping level or changing the index level default +setting. + +In `logsdb` index mode, the setting `index.mapping.total_fields.ignore_dynamic_beyond_limit` is set to `true` by +default. This allows dynamically mapped fields to be added on top of statically defined fields without causing document +rejection, even after the total number of fields exceeds the limit defined by `index.mapping.total_fields.limit`. The +`index.mapping.total_fields.limit` setting specifies the maximum number of fields an index can have (static, dynamic +and runtime). When the limit is reached, new dynamically mapped fields will be ignored instead of failing the document +indexing, ensuring continued log ingestion without errors. + +NOTE: When automatically injected, `host.name` and `@timestamp` contribute to the limit of mapped fields. When +`host.name` is mapped with `subobjects: true` it consists of two fields. When `host.name` is mapped with +`subobjects: false` it only consists of one field. + +[discrete] +[[logsdb-nodocvalue-fields]] +=== Fields without doc values + +When `logsdb` index mode uses synthetic `_source`, and `doc_values` are disabled for a field in the mapping, +Elasticsearch may set the `store` setting to `true` for that field as a last resort option to ensure that the field's +data is still available for reconstructing the document’s source when retrieving it via +<>. + +For example, this happens with text fields when `store` is `false` and there is no suitable multi-field available to +reconstruct the original value in <>. + +This automatic adjustment allows synthetic source to work correctly, even when doc values are not enabled for certain +fields. + +[discrete] +[[logsdb-settings-summary]] +=== LogsDB settings summary + +The following is a summary of key settings that apply when using `logsdb` index mode in Elasticsearch: + +* **`index.mode`**: `"logsdb"` + +* **`index.mapping.synthetic_source_keep`**: `"arrays"` + +* **`index.sort.field`**: `["host.name", "@timestamp"]` + +* **`index.sort.order`**: `["desc", "desc"]` + +* **`index.sort.mode`**: `["min", "min"]` + +* **`index.sort.missing`**: `["_first", "_first"]` + +* **`index.codec`**: `"best_compression"` + +* **`index.mapping.ignore_malformed`**: `true` + +* **`index.mapping.ignore_above`**: `8191` + +* **`index.mapping.total_fields.ignore_dynamic_beyond_limit`**: `true` From 160faa2dfc8c590dcb398487b79eb51eb84f8f44 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 09:29:34 -0700 Subject: [PATCH 117/202] Re-enable threadpool blocking in Kibana system index test (#112569) KibanaThreadPoolIT checks the Kibana system user can write (using the system read/write threadpools) even when the normal read/write threadpools are blocked. This commit re-enables a key part of the test which was disabled. closes #107625 --- .../kibana/KibanaThreadPoolIT.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 61bd31fea3455..553e4696af316 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -37,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; /** @@ -150,15 +153,15 @@ private void assertThreadPoolsBlocked() { new Thread(() -> expectThrows(EsRejectedExecutionException.class, () -> getFuture.actionGet(SAFE_AWAIT_TIMEOUT))).start(); // intentionally commented out this test until https://github.com/elastic/elasticsearch/issues/97916 is fixed - // var e3 = expectThrows( - // SearchPhaseExecutionException.class, - // () -> client().prepareSearch(USER_INDEX) - // .setQuery(QueryBuilders.matchAllQuery()) - // // Request times out if max concurrent shard requests is set to 1 - // .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) - // .get() - // ); - // assertThat(e3.getMessage(), containsString("all shards failed")); + var e3 = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(USER_INDEX) + .setQuery(QueryBuilders.matchAllQuery()) + // Request times out if max concurrent shard requests is set to 1 + .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) + .get() + ); + assertThat(e3.getMessage(), containsString("all shards failed")); } protected void runWithBlockedThreadPools(Runnable runnable) throws Exception { From 5c1a3ada8ae7a790dfd8460c76c6a341d9d42b7a Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:37:02 +0300 Subject: [PATCH 118/202] Propagate root subobjects setting to downsample indexes (#115358) * Propagate root subobjects setting to downsample indexes * exclude tests from rest compat * remove subobjects propagation --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 + .../downsample/DownsampleWithBasicRestIT.java | 40 ++ .../test/downsample/10_basic.yml | 466 +++++++++--------- 3 files changed, 292 insertions(+), 228 deletions(-) create mode 100644 x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index ba5ac7b0c7317..5142632a36006 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,6 +32,20 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } +tasks.named("yamlRestCompatTestTransform").configure ({ task -> + task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", + "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") +}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java new file mode 100644 index 0000000000000..8f75e76315844 --- /dev/null +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.downsample; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; + +public class DownsampleWithBasicRestIT extends ESClientYamlSuiteTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public DownsampleWithBasicRestIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + +} diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 0bcd35cc69038..fa3560bec516e 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -16,6 +16,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -106,6 +107,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -172,6 +174,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -237,6 +240,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -318,29 +322,29 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 0 } - - match: { hits.hits.0._source.k8s.pod.scaled-counter: 0.00 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 100 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 102 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 607 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.min: 100.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.max: 101.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.sum: 201.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434577921 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.56" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:43:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: ["backend", "test", "us-west2"] } - - match: { hits.hits.0._source.k8s.pod.values: [1, 1, 2] } - - is_false: hits.hits.0._source.k8s.pod.running + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-counter: 0.00 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 100 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 102 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 607 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.min: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.max: 101.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.sum: 201.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434577921 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.56" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:43:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: ["backend", "test", "us-west2"] } + - match: { hits.hits.0._source.k8s\.pod\.values: [1, 1, 2] } + - is_false: hits.hits.0._source.k8s\.pod\.running # Assert rollup index settings - do: @@ -362,21 +366,21 @@ setup: - match: { test-downsample.mappings.properties.@timestamp.type: date } - match: { test-downsample.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.type: scaled_float } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.scaling_factor: 100 } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.type: scaled_float } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.scaling_factor: 100 } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } # Assert source index has not been deleted @@ -763,18 +767,18 @@ setup: - match: { test-downsample-2.mappings.properties.@timestamp.type: date } - match: { test-downsample-2.mappings.properties.@timestamp.meta.fixed_interval: 2h } - match: { test-downsample-2.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -784,29 +788,29 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 76 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 95.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 110.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 1209.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 12 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434595272 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.120" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:45:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: [ "backend", "test", "us-west1" ] } - - match: { hits.hits.0._source.k8s.pod.values: [ 1, 2, 3 ] } - - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 76 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 95.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 110.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 1209.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 12 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434595272 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.120" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:45:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: [ "backend", "test", "us-west1" ] } + - match: { hits.hits.0._source.k8s\.pod\.values: [ 1, 2, 3 ] } + + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T20:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } @@ -890,16 +894,16 @@ setup: - match: { test-downsample-histogram.mappings.properties.@timestamp.type: date } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -910,64 +914,64 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.0._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.0: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.1: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.2: 8 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.3: 8 } - - length: { hits.hits.0._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.0._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.0: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.1: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.2: 8 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.3: 8 } + - length: { hits.hits.0._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.1._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.0: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.1: 5 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.2: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.3: 13 } - - length: { hits.hits.1._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.1._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.0: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.1: 5 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.2: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.3: 13 } + - length: { hits.hits.1._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.2._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.0: 8 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.1: 7 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.3: 12 } - - length: { hits.hits.2._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.2._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.0: 8 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.1: 7 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.3: 12 } + - length: { hits.hits.2._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.3: 10.0 } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.3._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.0: 7 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.1: 15 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.3: 10 } - - length: { hits.hits.3._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.3._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.0: 7 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.1: 15 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.3: 10 } + - length: { hits.hits.3._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.3: 10.0 } --- "Downsample date_nanos timestamp field using custom format": @@ -988,6 +992,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date_nanos @@ -1048,19 +1053,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000000000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000000000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1090,6 +1095,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1150,19 +1156,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1192,6 +1198,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-27T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1251,33 +1258,33 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 1 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23 } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 10.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-24 } - - match: { hits.hits.1._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 12.0 } - match: { hits.hits.2._source._doc_count: 1 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2023-02-25 } - - match: { hits.hits.2._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 8.0 } - match: { hits.hits.3._source._doc_count: 1 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2023-02-26 } - - match: { hits.hits.3._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 9.0 } --- "Downsample object field": @@ -1304,48 +1311,48 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 16.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 25.0 } - - match: { hits.hits.0._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.0._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 16.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 25.0 } + - match: { hits.hits.0._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.0._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T19:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "dog" } - - match: { hits.hits.1._source.k8s.pod.value.min: 17.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 25.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 42.0 } - - match: { hits.hits.1._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.1._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.1._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 17.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 25.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 42.0 } + - match: { hits.hits.1._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.1._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 20.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 30.0 } - - match: { hits.hits.2._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.2._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 20.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 30.0 } + - match: { hits.hits.2._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.2._source.k8s\.pod\.agent\.version: "2.0.4" } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: "2021-04-28T20:00:00.000Z" } - - match: { hits.hits.3._source.k8s.pod.name: "cat" } - - match: { hits.hits.3._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 15.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 27.0 } - - match: { hits.hits.3._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.3._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.3._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 15.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 27.0 } + - match: { hits.hits.3._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.3._source.k8s\.pod\.agent\.version: "2.0.4" } --- "Downsample empty and missing labels": @@ -1372,40 +1379,40 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.2._source._doc_count: 4 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.2._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.2._source.k8s.pod.label: "abc" } - - match: { hits.hits.2._source.k8s.pod.unmapped: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.2._source.k8s\.pod\.label: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.unmapped: "abc" } - match: { hits.hits.1._source._doc_count: 4 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "cat" } - - match: { hits.hits.1._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.1._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.1._source.k8s.pod.label: null } - - match: { hits.hits.1._source.k8s.pod.unmapped: null } + - match: { hits.hits.1._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.1._source.k8s\.pod\.label: null } + - match: { hits.hits.1._source.k8s\.pod\.unmapped: null } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.0._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.label: "xyz" } - - match: { hits.hits.0._source.k8s.pod.unmapped: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.label: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.unmapped: "xyz" } --- @@ -1427,6 +1434,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1495,45 +1503,45 @@ setup: - match: { hits.hits.0._source._doc_count: 2 } - match: { hits.hits.0._source.metricset: pod } - - match: { hits.hits.0._source.k8s.pod.name: dog } - - match: { hits.hits.0._source.k8s.pod.value: 20 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - - match: { hits.hits.0._source.k8s.pod.label: foo } + - match: { hits.hits.0._source.k8s\.pod\.name: dog } + - match: { hits.hits.0._source.k8s\.pod\.value: 20 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.label: foo } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - match: { hits.hits.1._source.metricset: pod } - - match: { hits.hits.1._source.k8s.pod.name: fox } - - match: { hits.hits.1._source.k8s.pod.value: 20 } - - match: { hits.hits.1._source.k8s.pod.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } - - match: { hits.hits.1._source.k8s.pod.label: bar } + - match: { hits.hits.1._source.k8s\.pod\.name: fox } + - match: { hits.hits.1._source.k8s\.pod\.value: 20 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } + - match: { hits.hits.1._source.k8s\.pod\.label: bar } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } - match: { hits.hits.2._source.metricset: pod } - - match: { hits.hits.2._source.k8s.pod.name: cat } - - match: { hits.hits.2._source.k8s.pod.value: 20 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.name: cat } + - match: { hits.hits.2._source.k8s\.pod\.value: 20 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } # NOTE: when downsampling a label field we propagate the last (most-recent timestamp-wise) non-null value, # ignoring/skipping null values. Here the last document has a value that hits ignore_above ("foofoo") and, # as a result, we propagate the value of the previous document ("foo") - - match: { hits.hits.2._source.k8s.pod.label: foo } + - match: { hits.hits.2._source.k8s\.pod\.label: foo } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.3._source._doc_count: 2 } - match: { hits.hits.3._source.metricset: pod } - - match: { hits.hits.3._source.k8s.pod.name: cow } - - match: { hits.hits.3._source.k8s.pod.value: 20 } - - match: { hits.hits.3._source.k8s.pod.uid: a81ef23a-489c-11ee-be56-0242ac120005 } - - match: { hits.hits.3._source.k8s.pod.label: null } + - match: { hits.hits.3._source.k8s\.pod\.name: cow } + - match: { hits.hits.3._source.k8s\.pod\.value: 20 } + - match: { hits.hits.3._source.k8s\.pod\.uid: a81ef23a-489c-11ee-be56-0242ac120005 } + - match: { hits.hits.3._source.k8s\.pod\.label: null } - match: { hits.hits.3._source.@timestamp: 2021-04-28T18:00:00.000Z } - do: indices.get_mapping: index: test-downsample-label-ignore-above - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.type: keyword } - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.ignore_above: 3 } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.type: keyword } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.ignore_above: 3 } --- "Downsample index with empty dimension": @@ -1555,6 +1563,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1612,11 +1621,11 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } --- "Downsample index with empty dimension on routing path": @@ -1638,6 +1647,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1695,8 +1705,8 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } From 97ed0a93bb75d0f920c976527f4f5fc0b6065beb Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Thu, 24 Oct 2024 13:26:15 -0400 Subject: [PATCH 119/202] Make a minor change to trigger release note process (#113975) * changelog entry --- docs/changelog/113975.yaml | 19 +++++++++++++++++++ docs/reference/mapping/params/format.asciidoc | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/113975.yaml diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml new file mode 100644 index 0000000000000..632ba038271bb --- /dev/null +++ b/docs/changelog/113975.yaml @@ -0,0 +1,19 @@ +pr: 113975 +summary: JDK locale database change +area: Mapping +type: breaking +issues: [] +breaking: + title: JDK locale database change + area: Mapping + details: | + {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. + + If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. + impact: | + This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. + + Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. + + For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. + notable: true diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 943e8fb879ff3..6c82b04eb5fe5 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -34,13 +34,13 @@ down to the nearest day. Completely customizable date formats are supported. The syntax for these is explained in https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/time/format/DateTimeFormatter.html[DateTimeFormatter docs]. -Note that whilst the built-in formats for week dates use the ISO definition of weekyears, +Note that while the built-in formats for week dates use the ISO definition of weekyears, custom formatters using the `Y`, `W`, or `w` field specifiers use the JDK locale definition of weekyears. This can result in different values between the built-in formats and custom formats for week dates. [[built-in-date-formats]] -==== Built In Formats +==== Built-in formats Most of the below formats have a `strict` companion format, which means that year, month and day parts of the month must use respectively 4, 2 and 2 digits From e951984831cc499f5f13efee0d6283ee8957f295 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 11:40:59 -0700 Subject: [PATCH 120/202] Reenable CacheFileTests (#115582) The test issue was fixed by #110807 closes #110801 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ab5d686a041c1..827a604cd6a19 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -23,9 +23,6 @@ tests: - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 -- class: org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFileTests - method: testCacheFileCreatedAsSparseFile - issue: https://github.com/elastic/elasticsearch/issues/110801 - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 From ad9c5a0a0640f62f763f63682f7e321c4d68ab41 Mon Sep 17 00:00:00 2001 From: Pawan Kartik Date: Thu, 24 Oct 2024 20:15:17 +0100 Subject: [PATCH 121/202] Correctly update search status for a nonexistent local index (#115138) * fix: correctly update search status for a nonexistent local index * Check for cluster existence before updation * Remove unnecessary `println` * Address review comment: add an explanatory code comment * Further clarify code comment --- .../search/ccs/CrossClusterSearchIT.java | 64 +++++++++++++++++++ .../action/search/TransportSearchAction.java | 23 +++++++ 2 files changed, 87 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5233a0cd564ef..5984e1acc89af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -755,6 +755,70 @@ public void testNegativeRemoteIndexNameThrows() { assertNotNull(ee.getCause()); } + public void testClusterDetailsWhenLocalClusterHasNoMatchingIndex() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + + SearchRequest searchRequest = new SearchRequest("nomatch*", REMOTE_CLUSTER + ":" + remoteIndex); + if (randomBoolean()) { + searchRequest = searchRequest.scroll(TimeValue.timeValueMinutes(1)); + } + + searchRequest.allowPartialSearchResults(false); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + + boolean minimizeRoundtrips = false; + searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should BE successful", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo("nomatch*")); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), equalTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); + } + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 1645a378446a4..302c3e243a1f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1247,6 +1247,29 @@ private void executeSearch( indicesAndAliases, concreteLocalIndices ); + + // localShardIterators is empty since there are no matching indices. In such cases, + // we update the local cluster's status from RUNNING to SUCCESSFUL right away. Before + // we attempt to do that, we must ensure that the local cluster was specified in the user's + // search request. This is done by trying to fetch the local cluster via getCluster() and + // checking for a non-null return value. If the local cluster was never specified, its status + // update can be skipped. + if (localShardIterators.isEmpty() + && clusters != SearchResponse.Clusters.EMPTY + && clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) != null) { + clusters.swapCluster( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + (alias, v) -> new SearchResponse.Cluster.Builder(v).setStatus(SearchResponse.Cluster.Status.SUCCESSFUL) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(Collections.emptyList()) + .setTook(TimeValue.timeValueMillis(0)) + .setTimedOut(false) + .build() + ); + } } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); From 79cfcec065311165f7d491d164e99fed6c5cbeb9 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:02 +0200 Subject: [PATCH 122/202] Clarify the null check for retention leases (#114979) `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory hasn't been initialized yet, so we have to keep the null check when loading retention leases during the initialization of the engine. See #39359 --- .../java/org/elasticsearch/gateway/MetadataStateFormat.java | 2 ++ .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java index 30b8d72b83f4c..3e68ec5243f5f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -485,6 +486,7 @@ public Tuple loadLatestStateWithGeneration(Logger logger, NamedXContent * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ + @Nullable public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegistry, Path... dataLocations) throws IOException { return loadLatestStateWithGeneration(logger, namedXContentRegistry, dataLocations).v1(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index e67e878fd3827..f1e3ac270d959 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -471,9 +470,9 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { return emptyIfNull(retentionLeases); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { - // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + // `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory + // on a node hasn't been initialized yet return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } From b4edc3ddab0ea910582c0dd0091ed5b147048280 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:23 +0200 Subject: [PATCH 123/202] Remove loading on-disk cluster metadata from the manifest file (#114698) Since metadata storage was moved to Lucene in #50907 (7.16.0), we shouldn't encounter any on-disk global metadata files, so we can remove support for loading them. --- .../gateway/GatewayIndexStateIT.java | 60 -------- .../gateway/GatewayMetaState.java | 13 -- .../gateway/MetaStateService.java | 119 +--------------- .../java/org/elasticsearch/node/Node.java | 1 - .../gateway/MetaStateServiceTests.java | 132 ------------------ .../gateway/MockGatewayMetaState.java | 8 -- 6 files changed, 1 insertion(+), 332 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 00bd350fe2b84..cdd5a52e048bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -27,14 +26,9 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.IOUtils; -import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -46,13 +40,8 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; -import java.nio.file.Path; import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -60,7 +49,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; @@ -545,52 +533,4 @@ public void testArchiveBrokenClusterSettings() throws Exception { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } - public void testHalfDeletedIndexImport() throws Exception { - // It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that - // deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the - // node won't start. - - final String nodeName = internalCluster().startNode(); - createIndex("test", 1, 0); - ensureGreen("test"); - - final Metadata metadata = internalCluster().getInstance(ClusterService.class).state().metadata(); - final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); - final String nodeId = clusterAdmin().prepareNodesInfo(nodeName).clear().get().getNodes().get(0).getNode().getId(); - - writeBrokenMeta(nodeEnvironment -> { - for (final Path path : paths) { - IOUtils.rm(path.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); - } - MetaStateWriterUtils.writeGlobalState( - nodeEnvironment, - "test", - Metadata.builder(metadata) - // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the - // term in the coordination metadata - .coordinationMetadata(CoordinationMetadata.builder(metadata.coordinationMetadata()).term(0L).build()) - // add a tombstone but do not delete the index metadata from disk - .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) - .build() - ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); - }); - - ensureGreen(); - - assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); - } - - private void writeBrokenMeta(CheckedConsumer writer) throws Exception { - Map nodeEnvironments = Stream.of(internalCluster().getNodeNames()) - .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(NodeEnvironment.class, nodeName))); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - final NodeEnvironment nodeEnvironment = nodeEnvironments.get(nodeName); - writer.accept(nodeEnvironment); - return super.onNodeStopped(nodeName); - } - }); - } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index c863a5bac973a..a7baca59e1857 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -33,8 +32,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; @@ -185,16 +182,6 @@ private PersistedState createOnDiskPersistedState( long lastAcceptedVersion = onDiskState.lastAcceptedVersion; long currentTerm = onDiskState.currentTerm; - if (onDiskState.empty()) { - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // legacy metadata loader is not needed anymore from v9 onwards - final Tuple legacyState = metaStateService.loadFullState(); - if (legacyState.v1().isEmpty() == false) { - metadata = legacyState.v2(); - lastAcceptedVersion = legacyState.v1().clusterStateVersion(); - currentTerm = legacyState.v1().currentTerm(); - } - } - PersistedState persistedState = null; boolean success = false; try { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4260ef51a3976..5f07deff31eea 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -12,22 +12,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Predicate; /** @@ -45,118 +40,6 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon this.namedXContentRegistry = namedXContentRegistry; } - /** - * Loads the full state, which includes both the global state and all the indices meta data.
- * When loading, manifest file is consulted (represented by {@link Manifest} class), to load proper generations.
- * If there is no manifest file on disk, this method fallbacks to BWC mode, where latest generation of global and indices - * metadata is loaded. Please note that currently there is no way to distinguish between manifest file being removed and manifest - * file was not yet created. It means that this method always fallbacks to BWC mode, if there is no manifest file. - * - * @return tuple of {@link Manifest} and {@link Metadata} with global metadata and indices metadata. If there is no state on disk, - * meta state with globalGeneration -1 and empty meta data is returned. - * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - public Tuple loadFullState() throws IOException { - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); - if (manifest == null) { - return loadFullStateBWC(); - } - - final Metadata.Builder metadataBuilder; - if (manifest.isGlobalGenerationMissing()) { - metadataBuilder = Metadata.builder(); - } else { - final Metadata globalMetadata = Metadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - manifest.globalGeneration(), - nodeEnv.nodeDataPaths() - ); - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - } else { - throw new IOException("failed to find global metadata [generation: " + manifest.globalGeneration() + "]"); - } - } - - for (Map.Entry entry : manifest.indexGenerations().entrySet()) { - final Index index = entry.getKey(); - final long generation = entry.getValue(); - final String indexFolderName = index.getUUID(); - final IndexMetadata indexMetadata = IndexMetadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - generation, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - if (indexMetadata != null) { - metadataBuilder.put(indexMetadata, false); - } else { - throw new IOException( - "failed to find metadata for existing index " - + index.getName() - + " [location: " - + indexFolderName - + ", generation: " - + generation - + "]" - ); - } - } - - return new Tuple<>(manifest, metadataBuilder.build()); - } - - /** - * "Manifest-less" BWC version of loading metadata from disk. See also {@link #loadFullState()} - */ - private Tuple loadFullStateBWC() throws IOException { - Map indices = new HashMap<>(); - Metadata.Builder metadataBuilder; - - Tuple metadataAndGeneration = Metadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.nodeDataPaths() - ); - Metadata globalMetadata = metadataAndGeneration.v1(); - long globalStateGeneration = metadataAndGeneration.v2(); - - final IndexGraveyard indexGraveyard; - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - indexGraveyard = globalMetadata.custom(IndexGraveyard.TYPE); - } else { - metadataBuilder = Metadata.builder(); - indexGraveyard = IndexGraveyard.builder().build(); - } - - for (String indexFolderName : nodeEnv.availableIndexFolders()) { - Tuple indexMetadataAndGeneration = IndexMetadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - IndexMetadata indexMetadata = indexMetadataAndGeneration.v1(); - long generation = indexMetadataAndGeneration.v2(); - if (indexMetadata != null) { - if (indexGraveyard.containsIndex(indexMetadata.getIndex())) { - logger.debug("[{}] found metadata for deleted index [{}]", indexFolderName, indexMetadata.getIndex()); - // this index folder is cleared up when state is recovered - } else { - indices.put(indexMetadata.getIndex(), generation); - metadataBuilder.put(indexMetadata, false); - } - } else { - logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); - } - } - - Manifest manifest = Manifest.unknownCurrentTermAndVersion(globalStateGeneration, indices); - return new Tuple<>(manifest, metadataBuilder.build()); - } - /** * Loads the index state for the provided index name, returning null if doesn't exists. */ @@ -193,7 +76,7 @@ List loadIndicesStates(Predicate excludeIndexPathIdsPredi } /** - * Loads the global state, *without* index state, see {@link #loadFullState()} for that. + * Loads the global state, *without* index state */ Metadata loadGlobalState() throws IOException { return Metadata.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index e30f76fdd9414..ec4a534fc883b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -325,7 +325,6 @@ public Node start() throws NodeValidationException { // TODO: Do not expect that the legacy metadata file is always present https://github.com/elastic/elasticsearch/issues/95211 if (Assertions.ENABLED && DiscoveryNode.isStateless(settings()) == false) { try { - assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty(); final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState( logger, NamedXContentRegistry.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 40c4e064216f1..1bbab8bf782bd 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -9,21 +9,15 @@ package org.elasticsearch.gateway; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.util.HashMap; - import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; public class MetaStateServiceTests extends ESTestCase { @@ -75,130 +69,4 @@ public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metadata.persistentSettings())); assertThat(metaStateService.loadGlobalState().hasIndex("test1"), equalTo(false)); } - - public void testLoadFullStateBWC() throws Exception { - IndexMetadata indexMetadata = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(indexMetadata, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "test_write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test_write", indexMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - Manifest manifest = manifestAndMetadata.v1(); - assertThat(manifest.globalGeneration(), equalTo(globalGeneration)); - assertThat(manifest.indexGenerations(), hasKey(indexMetadata.getIndex())); - assertThat(manifest.indexGenerations().get(indexMetadata.getIndex()), equalTo(indexGeneration)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(indexMetadata)); - } - - public void testLoadEmptyStateNoManifest() throws IOException { - Tuple manifestAndMetadata = metaStateService.loadFullState(); - - Manifest manifest = manifestAndMetadata.v1(); - assertTrue(manifest.isEmpty()); - - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadEmptyStateWithManifest() throws IOException { - Manifest manifest = Manifest.empty(); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadFullStateMissingGlobalMetadata() throws IOException { - IndexMetadata index = indexMetadata("test1"); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test", index); - Manifest manifest = new Manifest( - randomNonNegativeLong(), - randomNonNegativeLong(), - Manifest.empty().globalGeneration(), - new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - } - ); - assertTrue(manifest.isGlobalGenerationMissing()); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(loadedMetadata, Metadata.EMPTY_METADATA)); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - } - - public void testLoadFullStateAndUpdateAndClean() throws IOException { - IndexMetadata index = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(index, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "first global state write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "first index state write", index); - - Manifest manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - MetaStateWriterUtils.writeManifestAndCleanup(env, "first manifest write", manifest); - - Metadata newMetadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value2").build()) - .put(index, true) - .build(); - globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "second global state write", newMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - - MetaStateWriterUtils.writeManifestAndCleanup(env, "second manifest write", manifest); - Metadata.FORMAT.cleanupOldFiles(globalGeneration, env.nodeDataPaths()); - IndexMetadata.FORMAT.cleanupOldFiles(indexGeneration, env.indexPaths(index.getIndex())); - - manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(newMetadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - if (randomBoolean()) { - metaStateService.unreferenceAll(); - } else { - metaStateService.deleteAll(); - } - manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index d03396f9b53b3..64b468226e509 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -19,14 +18,12 @@ import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; -import java.io.IOException; import java.util.List; import static org.mockito.Mockito.mock; @@ -70,11 +67,6 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); final MetaStateService metaStateService = mock(MetaStateService.class); - try { - when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), Metadata.builder().build())); - } catch (IOException e) { - throw new AssertionError(e); - } start( settings, transportService, From e789039dfa8fee60dc2615c3876295ff7c6f3b01 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Thu, 24 Oct 2024 13:58:49 -0600 Subject: [PATCH 124/202] Fixing remote ENRICH by pushing the Enrich inside FragmentExec (#114665) * Fixing remote ENRICH by pushing the Enrich inside FragmentExec * Improve handling of more complex cases such as several enriches --- docs/changelog/114665.yaml | 6 ++ .../esql/action/CrossClustersEnrichIT.java | 102 ++++++++++++++++-- .../xpack/esql/analysis/Verifier.java | 7 -- .../xpack/esql/planner/Mapper.java | 42 ++++++++ .../optimizer/PhysicalPlanOptimizerTests.java | 63 +++++++++-- 5 files changed, 195 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/114665.yaml diff --git a/docs/changelog/114665.yaml b/docs/changelog/114665.yaml new file mode 100644 index 0000000000000..b90bb799bd896 --- /dev/null +++ b/docs/changelog/114665.yaml @@ -0,0 +1,6 @@ +pr: 114665 +summary: Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` +area: ES|QL +type: bug +issues: + - 105095 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 7d8bb738098d3..e8e9f45694e9c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -469,27 +470,112 @@ public void testEnrichRemoteWithVendor() { } } + public void testEnrichRemoteWithVendorNoSort() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + for (Enrich.Mode hostMode : List.of(Enrich.Mode.ANY, Enrich.Mode.REMOTE)) { + var query = String.format(Locale.ROOT, """ + FROM *:events,events + | LIMIT 100 + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.REMOTE)); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort(Comparator.comparing(o -> (String) o.get(1), Comparator.nullsLast(Comparator.naturalOrder()))); + assertThat( + values, + equalTo( + List.of( + List.of(6L, "Apple"), + List.of(7L, "Microsoft"), + List.of(1L, "Redhat"), + List.of(2L, "Samsung"), + List.of(1L, "Sony"), + List.of(2L, "Suse"), + Arrays.asList(3L, (String) null) + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } + } + } + public void testTopNThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events | eval ip= TO_STR(host) - | SORT ip + | SORT timestamp, user, ip | LIMIT 5 - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat( + getValuesList(resp), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.6", 2L, "sergio", "iOS") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testLimitThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events - | LIMIT 10 + | LIMIT 25 | eval ip= TO_STR(host) - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort( + Comparator.comparingLong((List o) -> (Long) o.get(1)) + .thenComparing(o -> (String) o.get(0)) + .thenComparing(o -> (String) o.get(2)) + ); + assertThat( + values.subList(0, 5), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.5", 2L, "simon", "Android") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testAggThenEnrichRemote() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index e2717cd9af0d1..fbaf43467a2e7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -609,22 +609,15 @@ private static void checkForSortableDataTypes(LogicalPlan p, Set localF */ private static void checkRemoteEnrich(LogicalPlan plan, Set failures) { boolean[] agg = { false }; - boolean[] limit = { false }; boolean[] enrichCoord = { false }; plan.forEachUp(UnaryPlan.class, u -> { - if (u instanceof Limit) { - limit[0] = true; // TODO: Make Limit then enrich_remote work - } if (u instanceof Aggregate) { agg[0] = true; } else if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.COORDINATOR) { enrichCoord[0] = true; } if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { - if (limit[0]) { - failures.add(fail(enrich, "ENRICH with remote policy can't be executed after LIMIT")); - } if (agg[0]) { failures.add(fail(enrich, "ENRICH with remote policy can't be executed after STATS")); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index e571be54692c4..152c492a34433 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -52,8 +52,10 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** *

This class is part of the planner

@@ -104,6 +106,46 @@ public PhysicalPlan map(LogicalPlan p) { // // Unary Plan // + if (localMode == false && p instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { + // When we have remote enrich, we want to put it under FragmentExec, so it would be executed remotely. + // We're only going to do it on the coordinator node. + // The way we're going to do it is as follows: + // 1. Locate FragmentExec in the tree. If we have no FragmentExec, we won't do anything. + // 2. Put this Enrich under it, removing everything that was below it previously. + // 3. Above FragmentExec, we should deal with pipeline breakers, since pipeline ops already are supposed to go under + // FragmentExec. + // 4. Aggregates can't appear here since the plan should have errored out if we have aggregate inside remote Enrich. + // 5. So we should be keeping: LimitExec, ExchangeExec, OrderExec, TopNExec (actually OrderExec probably can't happen anyway). + + var child = map(enrich.child()); + AtomicBoolean hasFragment = new AtomicBoolean(false); + + var childTransformed = child.transformUp((f) -> { + // Once we reached FragmentExec, we stuff our Enrich under it + if (f instanceof FragmentExec) { + hasFragment.set(true); + return new FragmentExec(p); + } + if (f instanceof EnrichExec enrichExec) { + // It can only be ANY because COORDINATOR would have errored out earlier, and REMOTE should be under FragmentExec + assert enrichExec.mode() == Enrich.Mode.ANY : "enrich must be in ANY mode here"; + return enrichExec.child(); + } + if (f instanceof UnaryExec unaryExec) { + if (f instanceof LimitExec || f instanceof ExchangeExec || f instanceof OrderExec || f instanceof TopNExec) { + return f; + } else { + return unaryExec.child(); + } + } + // Currently, it's either UnaryExec or LeafExec. Leaf will either resolve to FragmentExec or we'll ignore it. + return f; + }); + + if (hasFragment.get()) { + return childTransformed; + } + } if (p instanceof UnaryPlan ua) { var child = map(ua.child()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 964039268e30d..961c70acada7b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -172,7 +172,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:DEBUG", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -5851,14 +5851,14 @@ public void testEnrichBeforeLimit() { | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments | LIMIT 10"""); - var enrich = as(plan, EnrichExec.class); - assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); - assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); - var eval = as(enrich.child(), EvalExec.class); - var finalLimit = as(eval.child(), LimitExec.class); + var finalLimit = as(plan, LimitExec.class); var exchange = as(finalLimit.child(), ExchangeExec.class); var fragment = as(exchange.child(), FragmentExec.class); - var partialLimit = as(fragment.fragment(), Limit.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); as(partialLimit.child(), EsRelation.class); } } @@ -5901,13 +5901,21 @@ public void testLimitThenEnrich() { } public void testLimitThenEnrichRemote() { - var error = expectThrows(VerificationException.class, () -> physicalPlan(""" + var plan = physicalPlan(""" FROM test | LIMIT 10 | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments - """)); - assertThat(error.getMessage(), containsString("line 4:3: ENRICH with remote policy can't be executed after LIMIT")); + """); + var finalLimit = as(plan, LimitExec.class); + var exchange = as(finalLimit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); + as(partialLimit.child(), EsRelation.class); } public void testEnrichBeforeTopN() { @@ -5961,6 +5969,23 @@ public void testEnrichBeforeTopN() { var eval = as(enrich.child(), Eval.class); as(eval.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + | SORT department + | LIMIT 10"""); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var partialTopN = as(fragment.fragment(), TopN.class); + var enrich = as(partialTopN.child(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var eval = as(enrich.child(), Eval.class); + as(eval.child(), EsRelation.class); + } } public void testEnrichAfterTopN() { @@ -6000,6 +6025,24 @@ public void testEnrichAfterTopN() { var partialTopN = as(fragment.fragment(), TopN.class); as(partialTopN.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | SORT emp_no + | LIMIT 10 + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + """); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialTopN = as(evalFragment.child(), TopN.class); + as(partialTopN.child(), EsRelation.class); + } } public void testManyEnrich() { From cade0021736d69f66db4bc73c022258833c3ff38 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 15:34:27 -0500 Subject: [PATCH 125/202] Fixing ingest simulate yaml rest test when global legacy template is present (#115586) Sometimes the test framework adds a global legacy template. When this happens, a test that is using another legacy template to create an index emits a warning since the index matches two legacy templates. This PR allows that warning. --- .../resources/rest-api-spec/test/ingest/80_ingest_simulate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 7ed5ad3154151..2d3fa6b568381 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1537,6 +1537,8 @@ setup: - not_exists: docs.0.doc.error - do: + allowed_warnings: + - "index [foo-1] matches multiple legacy templates [global, my-legacy-template], composable templates will only match a single template" indices.create: index: foo-1 - match: { acknowledged: true } From d1c7e9886f483f2865b7780ce0ba44689fae622e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 24 Oct 2024 21:43:22 +0100 Subject: [PATCH 126/202] Update BlobCacheBufferedIndexInput::readVLong to correctly handle negative long values (#115594) --- docs/changelog/115594.yaml | 6 ++++++ .../blobcache/common/BlobCacheBufferedIndexInput.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115594.yaml diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml new file mode 100644 index 0000000000000..91a6089dfb3ce --- /dev/null +++ b/docs/changelog/115594.yaml @@ -0,0 +1,6 @@ +pr: 115594 +summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative + long values +area: Search +type: bug +issues: [] diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java index 16645e7523c36..7e7e954d1fa72 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java @@ -175,7 +175,7 @@ public final int readVInt() throws IOException { @Override public final long readVLong() throws IOException { - if (9 <= buffer.remaining()) { + if (10 <= buffer.remaining()) { return ByteBufferStreamInput.readVLong(buffer); } else { return super.readVLong(); From f444c86f857db0f82f528d217bf0da6f5b9308c5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 13:47:20 -0700 Subject: [PATCH 127/202] Add lookup index mode (#115143) This change introduces a new index mode, lookup, for indices intended for lookup operations in ES|QL. Lookup indices must have a single shard and be replicated to all data nodes by default. Aside from these requirements, they function as standard indices. Documentation will be added later when the lookup operator in ES|QL is implemented. --- .../test/indices.create/10_basic.yml | 67 ++++++ .../index/LookupIndexModeIT.java | 219 ++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../metadata/MetadataCreateIndexService.java | 16 +- .../org/elasticsearch/index/IndexMode.java | 115 ++++++++- .../monitor/metrics/IndicesMetrics.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 10 +- .../indices/CreateIndexCapabilities.java | 7 +- .../index/mapper/MapperServiceTestCase.java | 2 +- .../index/engine/FollowingEngineTests.java | 3 + 10 files changed, 436 insertions(+), 6 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 8242b7cdd29e7..31d127b80c844 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -149,3 +149,70 @@ indices.exists_alias: name: logs_2022-12-31 - is_true: '' + +--- +"Create lookup index": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index.mode: lookup + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with one shard": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index: + mode: lookup + number_of_shards: 1 + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with two shards": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + catch: /illegal_argument_exception/ + indices.create: + index: test_lookup + body: + settings: + index.mode: lookup + index.number_of_shards: 2 + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java new file mode 100644 index 0000000000000..f294d4a2e7943 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class LookupIndexModeIT extends ESIntegTestCase { + + @Override + protected int numberOfShards() { + return 1; + } + + public void testBasic() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder lookupSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + lookupSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(lookupSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (Map.Entry e : allHosts.entrySet()) { + client().prepareIndex("hosts").setSource("ip", e.getKey(), "os", e.getValue()).get(); + } + refresh("hosts"); + assertAcked(client().admin().indices().prepareCreate("events").setSettings(Settings.builder().put("index.mode", "logsdb")).get()); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + String ip = randomFrom(allHosts.keySet()); + String message = randomFrom("login", "logout", "shutdown", "restart"); + client().prepareIndex("events").setSource("@timestamp", "2024-01-01", "ip", ip, "message", message).get(); + } + refresh("events"); + // _search + { + SearchResponse resp = prepareSearch("events", "hosts").setQuery(new MatchQueryBuilder("_index_mode", "lookup")) + .setSize(10000) + .get(); + for (SearchHit hit : resp.getHits()) { + assertThat(hit.getIndex(), equalTo("hosts")); + } + assertHitCount(resp, allHosts.size()); + resp.decRef(); + } + // field_caps + { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + request.indices("events", "hosts"); + request.fields("*"); + request.setMergeResults(false); + request.indexFilter(new MatchQueryBuilder("_index_mode", "lookup")); + var resp = client().fieldCaps(request).actionGet(); + assertThat(resp.getIndexResponses(), hasSize(1)); + FieldCapabilitiesIndexResponse indexResponse = resp.getIndexResponses().getFirst(); + assertThat(indexResponse.getIndexMode(), equalTo(IndexMode.LOOKUP)); + assertThat(indexResponse.getIndexName(), equalTo("hosts")); + } + } + + public void testRejectMoreThanOneShard() { + int numberOfShards = between(2, 5); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + client().admin() + .indices() + .prepareCreate("hosts") + .setSettings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get(); + }); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + numberOfShards) + ); + } + + public void testResizeLookupIndex() { + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest("lookup-1").settings(createSettings); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createIndexRequest)); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "lookup-1").get(); + + ResizeRequest clone = new ResizeRequest("lookup-2", "lookup-1"); + clone.setResizeType(ResizeType.CLONE); + assertAcked(client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet()); + Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.number_of_shards"), equalTo("1")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + + ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1"); + split.setResizeType(ResizeType.SPLIT); + split.getTargetIndexRequest().settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, split).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 3") + ); + } + + public void testResizeRegularIndexToLookup() { + String dataNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareCreate("regular-1") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put("index.routing.allocation.require._name", dataNode) + ) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get() + ); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "regular-1").get(); + client().admin() + .indices() + .prepareUpdateSettings("regular-1") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + + ResizeRequest clone = new ResizeRequest("lookup-3", "regular-1"); + clone.setResizeType(ResizeType.CLONE); + clone.getTargetIndexRequest().settings(Settings.builder().put("index.mode", "lookup")); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 2") + ); + + ResizeRequest shrink = new ResizeRequest("lookup-4", "regular-1"); + shrink.setResizeType(ResizeType.SHRINK); + shrink.getTargetIndexRequest() + .settings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)); + + error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, shrink).actionGet() + ); + assertThat(error.getMessage(), equalTo("can't change index.mode of index [regular-1] from [standard] to [lookup]")); + } + + public void testDoNotOverrideAutoExpandReplicas() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + createSettings.put("index.auto_expand_replicas", "3-5"); + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(createSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("3-5")); + } +} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 777ff083f33f8..25bb792d827a9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -181,6 +181,7 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); + public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 69e3b7b70ff82..ed029db54bf06 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -308,7 +308,12 @@ private void onlyCreateIndex( final CreateIndexClusterStateUpdateRequest request, final ActionListener listener ) { - normalizeRequestSetting(request); + try { + normalizeRequestSetting(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } var delegate = new AllocationActionListener<>(listener, threadPool.getThreadContext()); submitUnbatchedTask( @@ -1599,6 +1604,15 @@ static IndexMetadata validateResize( // of if the source shards are divisible by the number of target shards IndexMetadata.getRoutingFactor(sourceMetadata.getNumberOfShards(), INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } + if (targetIndexSettings.hasValue(IndexSettings.MODE.getKey())) { + IndexMode oldMode = Objects.requireNonNullElse(sourceMetadata.getIndexMode(), IndexMode.STANDARD); + IndexMode newMode = IndexSettings.MODE.get(targetIndexSettings); + if (newMode != oldMode) { + throw new IllegalArgumentException( + "can't change index.mode of index [" + sourceIndex + "] from [" + oldMode + "] to [" + newMode + "]" + ); + } + } return sourceMetadata; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 75ec67f26dd3a..e6339344b6e5f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -9,7 +9,9 @@ package org.elasticsearch.index; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.compress.CompressedXContent; @@ -37,8 +39,10 @@ import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; import java.io.IOException; +import java.time.Instant; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.BooleanSupplier; @@ -308,6 +312,78 @@ public SourceFieldMapper.Mode defaultSourceMode() { public String getDefaultCodec() { return CodecService.BEST_COMPRESSION_CODEC; } + }, + LOOKUP("lookup") { + @Override + void validateWithOtherSettings(Map, Object> settings) { + final Integer providedNumberOfShards = (Integer) settings.get(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING); + if (providedNumberOfShards != null && providedNumberOfShards != 1) { + throw new IllegalArgumentException( + "index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + providedNumberOfShards + ); + } + } + + @Override + public void validateMapping(MappingLookup lookup) {}; + + @Override + public void validateAlias(@Nullable String indexRouting, @Nullable String searchRouting) {} + + @Override + public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) { + + } + + @Override + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { + return null; + } + + @Override + public TimestampBounds getTimestampBound(IndexMetadata indexMetadata) { + return null; + } + + @Override + public MetadataFieldMapper timeSeriesIdFieldMapper() { + // non time-series indices must not have a TimeSeriesIdFieldMapper + return null; + } + + @Override + public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { + // non time-series indices must not have a TimeSeriesRoutingIdFieldMapper + return null; + } + + @Override + public IdFieldMapper idFieldMapperWithoutFieldData() { + return ProvidedIdFieldMapper.NO_FIELD_DATA; + } + + @Override + public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { + return new ProvidedIdFieldMapper(fieldDataEnabled); + } + + @Override + public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + return DocumentDimensions.Noop.INSTANCE; + } + + @Override + public boolean shouldValidateTimestamp() { + return false; + } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) {} + + @Override + public SourceFieldMapper.Mode defaultSourceMode() { + return SourceFieldMapper.Mode.STORED; + } }; private static final String HOST_NAME = "host.name"; @@ -370,6 +446,7 @@ private static CompressedXContent createDefaultMapping(boolean includeHostName) static final List> VALIDATE_WITH_SETTINGS = List.copyOf( Stream.concat( Stream.of( + IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING, IndexMetadata.INDEX_ROUTING_PATH, IndexSettings.TIME_SERIES_START_TIME, @@ -476,11 +553,12 @@ public static IndexMode fromString(String value) { case "standard" -> IndexMode.STANDARD; case "time_series" -> IndexMode.TIME_SERIES; case "logsdb" -> IndexMode.LOGSDB; + case "lookup" -> IndexMode.LOOKUP; default -> throw new IllegalArgumentException( "[" + value + "] is an invalid index mode, valid modes are: [" - + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining()) + + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining(",")) + "]" ); }; @@ -492,6 +570,7 @@ public static IndexMode readFrom(StreamInput in) throws IOException { case 0 -> STANDARD; case 1 -> TIME_SERIES; case 2 -> LOGSDB; + case 3 -> LOOKUP; default -> throw new IllegalStateException("unexpected index mode [" + mode + "]"); }; } @@ -501,6 +580,7 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep case STANDARD -> 0; case TIME_SERIES -> 1; case LOGSDB -> 2; + case LOOKUP -> out.getTransportVersion().onOrAfter(TransportVersions.INDEX_MODE_LOOKUP) ? 3 : 0; }; out.writeByte((byte) code); } @@ -509,4 +589,37 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep public String toString() { return getName(); } + + /** + * A built-in index setting provider that supplies additional index settings based on the index mode. + * Currently, only the lookup index mode provides non-empty additional settings. + */ + public static final class IndexModeSettingsProvider implements IndexSettingProvider { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + IndexMode indexMode = templateIndexMode; + if (indexMode == null) { + String modeName = indexTemplateAndCreateRequestSettings.get(IndexSettings.MODE.getKey()); + if (modeName != null) { + indexMode = IndexMode.valueOf(modeName.toUpperCase(Locale.ROOT)); + } + } + if (indexMode == LOOKUP) { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .build(); + } else { + return Settings.EMPTY; + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java index 11df8710fad6c..ba67bc03e1441 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java @@ -55,7 +55,7 @@ public IndicesMetrics(MeterRegistry meterRegistry, IndicesService indicesService } private static List registerAsyncMetrics(MeterRegistry registry, IndicesStatsCache cache) { - final int TOTAL_METRICS = 36; + final int TOTAL_METRICS = 48; List metrics = new ArrayList<>(TOTAL_METRICS); for (IndexMode indexMode : IndexMode.values()) { String name = indexMode.getName(); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 7e3991c1df1f4..784e02059823b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -80,6 +80,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -108,6 +109,7 @@ import org.elasticsearch.health.node.tracker.RepositoriesHealthTracker; import org.elasticsearch.health.stats.HealthApiStats; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexingPressure; @@ -820,7 +822,10 @@ private void construct( final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( - pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + Sets.union( + builtinIndexSettingProviders(), + pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + ) ); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService); @@ -1656,4 +1661,7 @@ private Module loadPersistentTasksService( }; } + private Set builtinIndexSettingProviders() { + return Set.of(new IndexMode.IndexModeSettingsProvider()); + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java index 899486399af6b..900a352d42f30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java @@ -21,5 +21,10 @@ public class CreateIndexCapabilities { */ private static final String LOGSDB_INDEX_MODE_CAPABILITY = "logsdb_index_mode"; - public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY); + /** + * Support lookup index mode + */ + private static final String LOOKUP_INDEX_MODE_CAPABILITY = "lookup_index_mode"; + + public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY, LOOKUP_INDEX_MODE_CAPABILITY); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index da04f30ff8023..3960aa5a91cc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -142,7 +142,7 @@ protected static String randomIndexOptions() { protected final DocumentMapper createDocumentMapper(XContentBuilder mappings, IndexMode indexMode) throws IOException { return switch (indexMode) { - case STANDARD -> createDocumentMapper(mappings); + case STANDARD, LOOKUP -> createDocumentMapper(mappings); case TIME_SERIES -> createTimeSeriesModeDocumentMapper(mappings); case LOGSDB -> createLogsModeDocumentMapper(mappings); }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 478a0d08d6612..150eddf039cec 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -700,6 +700,9 @@ public void testProcessOnceOnPrimary() throws Exception { case LOGSDB: settingsBuilder.put("index.mode", IndexMode.LOGSDB.getName()); break; + case LOOKUP: + settingsBuilder.put("index.mode", IndexMode.LOOKUP.getName()); + break; default: throw new UnsupportedOperationException("Unknown index mode [" + indexMode + "]"); } From 057062bcae2b935294d3b9e91cdffdecd2a34208 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:09:46 +1100 Subject: [PATCH 128/202] Mute org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} #115600 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 827a604cd6a19..4af02859d88d4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-across-clusters/line_197} issue: https://github.com/elastic/elasticsearch/issues/115575 +- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT + method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} + issue: https://github.com/elastic/elasticsearch/issues/115600 # Examples: # From d5265bef572eaa87cc07b861ad00c74f8a955fbf Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 24 Oct 2024 23:17:06 +0200 Subject: [PATCH 129/202] Replace IndexNameExpressionResolver.ExpressionList with imperative logic (#115487) The approach taken by `ExpressionList` becomes very expensive for large numbers of indices/datastreams. It implies that large lists of concrete names (as they are passed down from the transport layer via e.g. security) are copied at least twice during iteration. Removing the intermediary list and inlining the logic brings down the latency of searches targetting many shards/indices at once and allows for subsequent optimizations. The removed tests appear redundant as they tested an implementation detail of the IndexNameExpressionResolver which itself is well covered by its own tests. --- .../metadata/IndexNameExpressionResolver.java | 186 +++++------ .../cluster/metadata/ExpressionListTests.java | 309 ------------------ 2 files changed, 85 insertions(+), 410 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2229166a2d779..39499253c8790 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -48,7 +48,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -253,7 +252,7 @@ protected static Collection resolveExpressions(Context context, String.. } else { return ExplicitResourceNameFilter.filterUnavailable( context, - DateMathExpressionResolver.resolve(context, List.of(expressions)) + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) ); } } else { @@ -264,7 +263,10 @@ protected static Collection resolveExpressions(Context context, String.. } else { return WildcardExpressionResolver.resolve( context, - ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) + ExplicitResourceNameFilter.filterUnavailable( + context, + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) + ) ); } } @@ -1294,34 +1296,51 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres * */ public static Collection resolve(Context context, List expressions) { - ExpressionList expressionList = new ExpressionList(context, expressions); // fast exit if there are no wildcards to evaluate - if (expressionList.hasWildcard() == false) { + if (context.getOptions().expandWildcardExpressions() == false) { + return expressions; + } + int firstWildcardIndex = 0; + for (; firstWildcardIndex < expressions.size(); firstWildcardIndex++) { + String expression = expressions.get(firstWildcardIndex); + if (isWildcard(expression)) { + break; + } + } + if (firstWildcardIndex == expressions.size()) { return expressions; } Set result = new HashSet<>(); - for (ExpressionList.Expression expression : expressionList) { - if (expression.isWildcard()) { - Stream matchingResources = matchResourcesToWildcard(context, expression.get()); + for (int i = 0; i < firstWildcardIndex; i++) { + result.add(expressions.get(i)); + } + AtomicBoolean emptyWildcardExpansion = context.getOptions().allowNoIndices() ? null : new AtomicBoolean(); + for (int i = firstWildcardIndex; i < expressions.size(); i++) { + String expression = expressions.get(i); + boolean isExclusion = i > firstWildcardIndex && expression.charAt(0) == '-'; + if (i == firstWildcardIndex || isWildcard(expression)) { + Stream matchingResources = matchResourcesToWildcard( + context, + isExclusion ? expression.substring(1) : expression + ); Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); - AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); - if (context.getOptions().allowNoIndices() == false) { + if (emptyWildcardExpansion != null) { emptyWildcardExpansion.set(true); matchingOpenClosedNames = matchingOpenClosedNames.peek(x -> emptyWildcardExpansion.set(false)); } - if (expression.isExclusion()) { - matchingOpenClosedNames.forEachOrdered(result::remove); + if (isExclusion) { + matchingOpenClosedNames.forEach(result::remove); } else { - matchingOpenClosedNames.forEachOrdered(result::add); + matchingOpenClosedNames.forEach(result::add); } - if (emptyWildcardExpansion.get()) { - throw notFoundException(expression.get()); + if (emptyWildcardExpansion != null && emptyWildcardExpansion.get()) { + throw notFoundException(expression); } } else { - if (expression.isExclusion()) { - result.remove(expression.get()); + if (isExclusion) { + result.remove(expression.substring(1)); } else { - result.add(expression.get()); + result.add(expression); } } } @@ -1507,27 +1526,35 @@ private DateMathExpressionResolver() { // utility class } + /** + * Resolves date math expressions. If this is a noop the given {@code expressions} list is returned without copying. + * As a result callers of this method should not mutate the returned list. Mutating it may come with unexpected side effects. + */ public static List resolve(Context context, List expressions) { - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - result.add(resolveExpression(expression, context::getStartTime)); + boolean wildcardSeen = false; + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + String[] result = null; + for (int i = 0, n = expressions.size(); i < n; i++) { + String expression = expressions.get(i); + // accepts date-math exclusions that are of the form "-<...{}>",f i.e. the "-" is outside the "<>" date-math template + boolean isExclusion = wildcardSeen && expression.startsWith("-"); + wildcardSeen = wildcardSeen || (expandWildcards && isWildcard(expression)); + String toResolve = isExclusion ? expression.substring(1) : expression; + String resolved = resolveExpression(toResolve, context::getStartTime); + if (toResolve != resolved) { + if (result == null) { + result = expressions.toArray(Strings.EMPTY_ARRAY); + } + result[i] = isExclusion ? "-" + resolved : resolved; + } } - return result; + return result == null ? expressions : Arrays.asList(result); } static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { - if (expression.isExclusion()) { - // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - return "-" + resolveExpression(expression.get(), getTime); - } else { - return resolveExpression(expression.get(), getTime); - } - } - static String resolveExpression(String expression, LongSupplier getTime) { if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { return expression; @@ -1689,14 +1716,35 @@ private ExplicitResourceNameFilter() { */ public static List filterUnavailable(Context context, List expressions) { ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - validateAliasOrIndex(expression); - if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { - result.add(expression.expression()); + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + boolean wildcardSeen = false; + List result = null; + for (int i = 0; i < expressions.size(); i++) { + String expression = expressions.get(i); + if (Strings.isEmpty(expression)) { + throw notFoundException(expression); + } + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.charAt(0) == '_') { + throw new InvalidIndexNameException(expression, "must not start with '_'."); + } + final boolean isWildcard = expandWildcards && isWildcard(expression); + if (isWildcard || (wildcardSeen && expression.charAt(0) == '-') || ensureAliasOrIndexExists(context, expression)) { + if (result != null) { + result.add(expression); + } + } else { + if (result == null) { + result = new ArrayList<>(expressions.size() - 1); + result.addAll(expressions.subList(0, i)); + } } + wildcardSeen |= isWildcard; } - return result; + return result == null ? expressions : result; } /** @@ -1736,19 +1784,6 @@ private static boolean ensureAliasOrIndexExists(Context context, String name) { return true; } - private static void validateAliasOrIndex(ExpressionList.Expression expression) { - if (Strings.isEmpty(expression.expression())) { - throw notFoundException(expression.expression()); - } - // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API - // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, - // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown - // if the expression can't be found. - if (expression.expression().charAt(0) == '_') { - throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); - } - } - private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { if (options.ignoreUnavailable()) { return; @@ -1773,57 +1808,6 @@ private static void failOnRemoteIndicesNotIgnoringUnavailable(List index } } - /** - * Used to iterate expression lists and work out which expression item is a wildcard or an exclusion. - */ - public static final class ExpressionList implements Iterable { - private final List expressionsList; - private final boolean hasWildcard; - - public record Expression(String expression, boolean isWildcard, boolean isExclusion) { - public String get() { - if (isExclusion()) { - // drop the leading "-" if exclusion because it is easier for callers to handle it like this - return expression().substring(1); - } else { - return expression(); - } - } - } - - /** - * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). - * The {@param context} is used to check if wildcards ought to be considered or not. - */ - public ExpressionList(Context context, List expressionStrings) { - List expressionsList = new ArrayList<>(expressionStrings.size()); - boolean wildcardSeen = false; - for (String expressionString : expressionStrings) { - boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; - if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { - wildcardSeen = true; - expressionsList.add(new Expression(expressionString, true, isExclusion)); - } else { - expressionsList.add(new Expression(expressionString, false, isExclusion)); - } - } - this.expressionsList = expressionsList; - this.hasWildcard = wildcardSeen; - } - - /** - * Returns {@code true} if the expression contains any wildcard and the options allow wildcard expansion - */ - public boolean hasWildcard() { - return this.hasWildcard; - } - - @Override - public Iterator iterator() { - return expressionsList.iterator(); - } - } - /** * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} * since it uses only the start time to resolve expressions. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java deleted file mode 100644 index 1ca59ff402bd8..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList.Expression; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.ESTestCase; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ExpressionListTests extends ESTestCase { - - public void testEmpty() { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - } - - public void testExplicitSingleNameExpression() { - for (IndicesOptions indicesOptions : List.of(getExpandWildcardsIndicesOptions(), getNoExpandWildcardsIndicesOptions())) { - for (String expressionString : List.of("non_wildcard", "-non_exclusion")) { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.get(), is(expressionString)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - } - - public void testWildcardSingleExpression() { - for (String wildcardTest : List.of("*", "a*", "*b", "a*b", "a-*b", "a*-b", "-*", "-a*", "-*b", "**", "*-*")) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - List.of(wildcardTest) - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of(wildcardTest)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.get(), is(wildcardTest)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - - public void testWildcardLongerExpression() { - List onlyExplicits = randomList(7, () -> randomAlphaOfLengthBetween(0, 5)); - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**"); - List expressionList = new ArrayList<>(onlyExplicits.size() + 1); - expressionList.addAll(randomSubsetOf(onlyExplicits)); - int wildcardPos = expressionList.size(); - expressionList.add(wildcard); - for (String item : onlyExplicits) { - if (expressionList.contains(item) == false) { - expressionList.add(item); - } - } - ExpressionList expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - assertThat(expressionIterable.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - } - int i = 0; - for (Expression expression : expressionIterable) { - assertThat(expression.isExclusion(), is(false)); - if (i != wildcardPos) { - assertThat(expression.isWildcard(), is(false)); - } else { - assertThat(expression.isWildcard(), is(true)); - } - assertThat(expression.get(), is(expressionList.get(i++))); - } - } - - public void testWildcardsNoExclusionExpressions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b"), - List.of("-*", "a", "b*") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isExclusion(), is(false)); - if (wildcardExpression.get(i).contains("*")) { - assertThat(expression.isWildcard(), is(true)); - } else { - assertThat(expression.isWildcard(), is(false)); - } - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testWildcardExpressionNoExpandOptions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("*d", "-"), - List.of("*", "-*"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getNoExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testSingleExclusionExpression() { - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**", "*-*"); - int wildcardPos = randomIntBetween(0, 3); - String exclusion = randomFrom("-*", "-", "-c*", "-ab", "--"); - int exclusionPos = randomIntBetween(wildcardPos + 1, 7); - List exclusionExpression = new ArrayList<>(); - for (int i = 0; i < wildcardPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(wildcard); - for (int i = wildcardPos + 1; i < exclusionPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(exclusion); - for (int i = 0; i < randomIntBetween(0, 3); i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), exclusionExpression); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - if (i == wildcardPos) { - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } else if (i == exclusionPos) { - assertThat(expression.isExclusion(), is(true)); - assertThat(expression.isWildcard(), is(exclusionExpression.get(i).contains("*"))); - assertThat(expression.get(), is(exclusionExpression.get(i++).substring(1))); - } else { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } - } - } - - public void testExclusionsExpression() { - for (Tuple, List> exclusionExpression : List.of( - new Tuple<>(List.of("-a", "*", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("-b*", "c", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("*d", "-", "*b"), List.of(false, true, false)), - new Tuple<>(List.of("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), - new Tuple<>(List.of("*-", "-*", "a", "-b"), List.of(false, true, false, true)), - new Tuple<>(List.of("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), - new Tuple<>(List.of("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - exclusionExpression.v1() - ); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - boolean isExclusion = exclusionExpression.v2().get(i); - assertThat(expression.isExclusion(), is(isExclusion)); - assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).contains("*"))); - if (isExclusion) { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).substring(1))); - } else { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++))); - } - } - } - } - - private IndicesOptions getExpandWildcardsToOpenOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToCloseOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToOpenCloseIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsIndicesOptions() { - return ESTestCase.>randomFrom( - this::getExpandWildcardsToOpenOnlyIndicesOptions, - this::getExpandWildcardsToCloseOnlyIndicesOptions, - this::getExpandWildcardsToOpenCloseIndicesOptions - ).get(); - } - - private IndicesOptions getNoExpandWildcardsIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private Context getContextWithOptions(IndicesOptions indicesOptions) { - Context context = mock(Context.class); - when(context.getOptions()).thenReturn(indicesOptions); - return context; - } -} From b2ab9df1a9ff71442ad8d695ec15fcf8b72e133d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 22:33:56 +0100 Subject: [PATCH 130/202] [ML] Fix timeout attaching to missing deployment (#115517) Fixes a timeout in the Inference API where if connecting to an existing deployment and that deployment does not exist the listener was not called. --- .../xpack/inference/CreateFromDeploymentIT.java | 8 ++++++++ .../ElasticsearchInternalService.java | 14 +++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java index 0bfb6e9e43b03..273b16d295a3d 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java @@ -109,6 +109,14 @@ public void testModelIdDoesNotMatch() throws IOException { ); } + public void testDeploymentDoesNotExist() { + var deploymentId = "missing_deployment"; + + var inferenceId = "inference_on_missing_deployment"; + var e = expectThrows(ResponseException.class, () -> putModel(inferenceId, endpointConfig(deploymentId), TaskType.SPARSE_EMBEDDING)); + assertThat(e.getMessage(), containsString("Cannot find deployment [missing_deployment]")); + } + public void testNumAllocationsIsUpdated() throws IOException { var modelId = "update_num_allocations"; var deploymentId = modelId; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index a0235f74ce511..fec690199d97d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -34,7 +34,6 @@ import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; @@ -913,7 +912,7 @@ private void validateAgainstDeployment( listener.onFailure( new ElasticsearchStatusException( "Deployment [{}] uses model [{}] which does not match the model [{}] in the request.", - RestStatus.BAD_REQUEST, // TODO better message + RestStatus.BAD_REQUEST, deploymentId, response.get().getModelId(), modelId @@ -933,21 +932,22 @@ private void validateAgainstDeployment( checkTaskTypeForMlNodeModel(response.get().getModelId(), taskType, l.delegateFailureAndWrap((l2, compatibleTaskType) -> { l2.onResponse(updatedSettings); })); + } else { + listener.onFailure(new ElasticsearchStatusException("Cannot find deployment [{}]", RestStatus.NOT_FOUND, deploymentId)); } })); } private void getDeployment(String deploymentId, ActionListener> listener) { client.execute( - GetTrainedModelsStatsAction.INSTANCE, - new GetTrainedModelsStatsAction.Request(deploymentId), + GetDeploymentStatsAction.INSTANCE, + new GetDeploymentStatsAction.Request(deploymentId), listener.delegateFailureAndWrap((l, response) -> { l.onResponse( - response.getResources() + response.getStats() .results() .stream() - .filter(s -> s.getDeploymentStats() != null && s.getDeploymentStats().getDeploymentId().equals(deploymentId)) - .map(GetTrainedModelsStatsAction.Response.TrainedModelStats::getDeploymentStats) + .filter(s -> s.getDeploymentId() != null && s.getDeploymentId().equals(deploymentId)) .findFirst() ); }) From c556a293c384b92a9ef71ec37bd49fb143300236 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:44 +1100 Subject: [PATCH 131/202] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT test {yaml=indices.create/10_basic/Create lookup index} #115605 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4af02859d88d4..084bf27d6a11b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/115600 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + method: test {yaml=indices.create/10_basic/Create lookup index} + issue: https://github.com/elastic/elasticsearch/issues/115605 # Examples: # From 5714b989fabcf944fb719f31200661789e0824f2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 16:58:41 -0700 Subject: [PATCH 132/202] Do not run lookup index YAML with two shards (#115608) We can randomly inject a global template that defaults to 2 shards instead of 1. This causes the lookup index YAML tests to fail. To avoid this, the change requires specifying the default_shards setting for these tests --- .../resources/rest-api-spec/test/indices.create/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 31d127b80c844..d0e1759073e1b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -153,7 +153,7 @@ --- "Create lookup index": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} @@ -176,7 +176,7 @@ --- "Create lookup index with one shard": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} From bbd887a66a1330188047825799dc8368dbd56ba8 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Fri, 25 Oct 2024 07:45:40 +0200 Subject: [PATCH 133/202] Identify system threads using a Thread subclass (#113562) --- .../common/util/concurrent/EsExecutors.java | 35 ++++++++++--- .../DefaultBuiltInExecutorBuilders.java | 12 +++-- .../threadpool/ExecutorBuilder.java | 7 ++- .../threadpool/FixedExecutorBuilder.java | 49 +++++++++++++++++-- .../threadpool/ScalingExecutorBuilder.java | 4 +- .../util/concurrent/EsExecutorsTests.java | 8 ++- 6 files changed, 98 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index b10db7d4d1dd3..9120576815bac 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -326,16 +326,25 @@ public static String executorName(Thread thread) { } public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { - return daemonThreadFactory(threadName(settings, namePrefix)); + return createDaemonThreadFactory(threadName(settings, namePrefix), false); } public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) { + return daemonThreadFactory(nodeName, namePrefix, false); + } + + public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix, boolean isSystemThread) { assert nodeName != null && false == nodeName.isEmpty(); - return daemonThreadFactory(threadName(nodeName, namePrefix)); + return createDaemonThreadFactory(threadName(nodeName, namePrefix), isSystemThread); } - public static ThreadFactory daemonThreadFactory(String namePrefix) { - return new EsThreadFactory(namePrefix); + public static ThreadFactory daemonThreadFactory(String name) { + assert name != null && name.isEmpty() == false; + return createDaemonThreadFactory(name, false); + } + + private static ThreadFactory createDaemonThreadFactory(String namePrefix, boolean isSystemThread) { + return new EsThreadFactory(namePrefix, isSystemThread); } static class EsThreadFactory implements ThreadFactory { @@ -343,22 +352,36 @@ static class EsThreadFactory implements ThreadFactory { final ThreadGroup group; final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + final boolean isSystem; - EsThreadFactory(String namePrefix) { + EsThreadFactory(String namePrefix, boolean isSystem) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + this.isSystem = isSystem; } @Override public Thread newThread(Runnable r) { return AccessController.doPrivileged((PrivilegedAction) () -> { - Thread t = new Thread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0); + Thread t = new EsThread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0, isSystem); t.setDaemon(true); return t; }); } + } + public static class EsThread extends Thread { + private final boolean isSystem; + + EsThread(ThreadGroup group, Runnable target, String name, long stackSize, boolean isSystem) { + super(group, target, name, stackSize); + this.isSystem = isSystem; + } + + public boolean isSystem() { + return isSystem; + } } /** diff --git a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java index c3a24d012c013..a97d22a976631 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java +++ b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java @@ -170,7 +170,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -180,7 +181,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); result.put( @@ -190,7 +192,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -200,7 +203,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_WRITE, halfProcMaxAt5, 1500, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); return unmodifiableMap(result); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java index 2337d51d07571..c259feb1c978e 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -24,9 +24,11 @@ public abstract class ExecutorBuilder { private final String name; + private final boolean isSystemThread; - public ExecutorBuilder(String name) { + public ExecutorBuilder(String name, boolean isSystemThread) { this.name = name; + this.isSystemThread = isSystemThread; } protected String name() { @@ -90,4 +92,7 @@ abstract static class ExecutorSettings { } + public boolean isSystemThread() { + return isSystemThread; + } } diff --git a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index 07db563da39a1..9c723f241f1d0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -51,7 +51,28 @@ public final class FixedExecutorBuilder extends ExecutorBuilder( sizeKey, @@ -102,7 +145,7 @@ FixedExecutorSettings getSettings(Settings settings) { ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int queueSize = settings.queueSize; - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name(), isSystemThread()); final ExecutorService executor = EsExecutors.newFixed( settings.nodeName + "/" + name(), size, diff --git a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java index a31f940cdb2dc..1017d41a77444 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java @@ -104,7 +104,7 @@ public ScalingExecutorBuilder( final String prefix, final EsExecutors.TaskTrackingConfig trackingConfig ) { - super(name); + super(name, false); this.coreSetting = Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); this.keepAliveSetting = Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); @@ -131,7 +131,7 @@ ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final Th int core = settings.core; int max = settings.max; final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name()); ExecutorService executor; executor = EsExecutors.newScaling( settings.nodeName + "/" + name(), diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index bdfec9dfaa630..2867c9e007937 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -635,15 +635,19 @@ public void testParseExecutorName() throws InterruptedException { final var executorName = randomAlphaOfLength(10); final String nodeName = rarely() ? null : randomIdentifier(); final ThreadFactory threadFactory; + final boolean isSystem; if (nodeName == null) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory(Settings.EMPTY, executorName); } else if (randomBoolean()) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(), executorName ); } else { - threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName); + isSystem = randomBoolean(); + threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName, isSystem); } final var thread = threadFactory.newThread(() -> {}); @@ -652,6 +656,8 @@ public void testParseExecutorName() throws InterruptedException { assertThat(EsExecutors.executorName(thread), equalTo(executorName)); assertThat(EsExecutors.executorName("TEST-" + thread.getName()), is(nullValue())); assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(((EsExecutors.EsThread) thread).isSystem(), equalTo(isSystem)); } finally { thread.join(); } From 7f573c6c28fb42e89d8bb76d6764dc681c239e06 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:05 +0200 Subject: [PATCH 134/202] Only aggregations require at least one shard request (#115314) * unskipping shards only when aggs * Update docs/changelog/115314.yaml * fixed more tests * null check for searchRequest.source() --- docs/changelog/115314.yaml | 5 +++ .../datastreams/TSDBIndexingIT.java | 2 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 ++- .../test/multi_cluster/70_skip_shards.yml | 12 +++---- .../multi_cluster/90_index_name_query.yml | 4 +-- .../search/ccs/CrossClusterSearchIT.java | 4 +-- .../search/profile/query/QueryProfilerIT.java | 6 +++- .../search/stats/FieldUsageStatsIT.java | 12 ++++--- .../action/search/TransportSearchAction.java | 4 ++- .../search/CrossClusterAsyncSearchIT.java | 32 +++++++++++++------ .../mapper/SearchIdleTests.java | 10 ++---- .../rrf/RRFRankCoordinatorCanMatchIT.java | 5 +-- .../rank/rrf/RRFRankShardCanMatchIT.java | 5 +-- ...pshotsCanMatchOnCoordinatorIntegTests.java | 12 +++---- .../checkpoint/TransformCCSCanMatchIT.java | 6 ++-- .../oldrepos/OldRepositoryAccessIT.java | 3 +- 16 files changed, 70 insertions(+), 56 deletions(-) create mode 100644 docs/changelog/115314.yaml diff --git a/docs/changelog/115314.yaml b/docs/changelog/115314.yaml new file mode 100644 index 0000000000000..76ac12d58fcf3 --- /dev/null +++ b/docs/changelog/115314.yaml @@ -0,0 +1,5 @@ +pr: 115314 +summary: Only aggregations require at least one shard request +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 29ec326548f2b..aad68660d2e4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -412,7 +412,7 @@ public void testSkippingShards() throws Exception { assertResponse(client().search(searchRequest), searchResponse -> { ElasticsearchAssertions.assertNoSearchHits(searchResponse); assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSkippedShards(), equalTo(2)); assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); }); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5dde1d664402f..79cdc1047aec9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -580,13 +581,14 @@ public void testSortByField() throws Exception { public void testSortByFieldOneClusterHasNoResults() throws Exception { assumeMultiClusterSetup(); - // set to a value greater than the number of shards to avoid differences due to the skipping of shards + // setting aggs to avoid differences due to the skipping of shards when matching none SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); + sourceBuilder.aggregation(AggregationBuilders.max("max").field("creationDate")); CheckedConsumer responseChecker = response -> { assertHits(response); int size = response.evaluateArraySize("hits.hits"); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index 92ae11c712b25..f392ae6d09413 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -166,8 +166,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a terms query @@ -183,8 +182,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a prefix query @@ -200,8 +198,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a wildcard query @@ -217,7 +214,6 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml index a60a1b0d812ee..be2ce033b123c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml @@ -81,7 +81,7 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } - do: @@ -98,5 +98,5 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5984e1acc89af..63eece88a53fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -214,7 +214,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // with DFS_QUERY_THEN_FETCH, the local shards are never skipped assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -224,7 +224,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e6cd89c09b979..0c1012c520dac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -68,7 +68,11 @@ public void testProfileQuery() throws Exception { prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { assertNotNull("Profile response element should not be null", response.getProfileResults()); - assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + if (response.getSkippedShards() == response.getSuccessfulShards()) { + assertEquals(0, response.getProfileResults().size()); + } else { + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + } for (Map.Entry shard : response.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 140afd6b269b3..3d5120226ebed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -158,11 +158,15 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertTrue(stats.hasField("date_field")); assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet()); - // can_match does not enter search stats - // there is a special case though where we have no hit but we need to get at least one search response in order - // to produce a valid search result with all the aggs etc., so we hit one of the two shards + + long expectedShards = 2L * numShards; + if (numShards == 1) { + // with 1 shard and setPreFilterShardSize(1) we don't perform can_match phase but instead directly query the shard + expectedShards += 1; + } + assertEquals( - (2 * numShards) + 1, + expectedShards, indicesAdmin().prepareStats("test") .clear() .setSearch(true) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 302c3e243a1f6..8f718972c2eaa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1458,6 +1458,8 @@ public SearchPhase newSearchPhase( SearchResponse.Clusters clusters ) { if (preFilter) { + // only for aggs we need to contact shards even if there are no matches + boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; return new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -1469,7 +1471,7 @@ public SearchPhase newSearchPhase( shardIterators, timeProvider, task, - true, + requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( (l, iters) -> newSearchPhase( diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 9d83f88a043e2..3cd8778069d0c 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -274,6 +274,8 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except boolean dfs = randomBoolean(); if (dfs) { request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } else { + request.getSearchRequest().searchType(SearchType.QUERY_THEN_FETCH); } RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000); request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); @@ -288,20 +290,30 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); - assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); - + if (dfs) { + assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); + } else { + assertFalse( + "search cluster results should not be marked as partial as all shards are skipped", + clusters.hasPartialResults() + ); + } SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + if (dfs) { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + } else { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + } SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); } finally { response.decRef(); } - - SearchListenerPlugin.waitSearchStarted(); + if (dfs) { + SearchListenerPlugin.waitSearchStarted(); + } SearchListenerPlugin.allowQueryPhase(); waitForSearchTasksToFinish(); @@ -331,7 +343,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -341,7 +353,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } @@ -377,7 +389,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -387,7 +399,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 2da4e2802bdbe..9eb792428537b 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; public class SearchIdleTests extends ESSingleNodeTestCase { @@ -133,8 +132,7 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept // WHEN assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> { assertEquals(RestStatus.OK, searchResponse.status()); - // NOTE: we need an empty result from at least one shard - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount, searchResponse.getSkippedShards()); assertEquals(0, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getHits().getHits().length); }); @@ -144,12 +142,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); - // If no shards match the can match phase then at least one shard gets queries for an empty response. - // However, this affects the search idle stats. List active = Arrays.stream(afterStatsResponse.getShards()).filter(s -> s.isSearchIdle() == false).toList(); - assertThat(active, hasSize(1)); - assertThat(active.get(0).getShardRouting().getIndexName(), equalTo("test1")); - assertThat(active.get(0).getShardRouting().id(), equalTo(0)); + assertThat(active, hasSize(0)); } public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedException { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java index 445aeaa375e11..467668f008b04 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexSettings; @@ -206,10 +207,10 @@ public void testCanMatchCoordinator() throws Exception { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java index 084ccc88bee33..09fe8d1b7ad6e 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.search.SearchType; @@ -199,10 +200,10 @@ public void testCanMatchShard() throws IOException { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index ed42d86bc8c49..259d38b1fe8ee 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -384,11 +384,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } else { assertResponse(client().search(request), newSearchResponse -> { - // When all shards are skipped, at least one of them should be queried in order to - // provide a proper search response. - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getFailedShards(), equalTo(1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); }); @@ -748,9 +746,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // All the regular index searches succeeded assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - // We have to query at least one node to construct a valid response, and we pick - // a shard that's available in order to construct the search response - assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards)); assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); assertThat(newSearchResponse.getHits().getTotalHits().value(), equalTo(0L)); }); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index a7f7b5bd3edda..208da4177fd4c 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -197,15 +197,13 @@ public void testSearchAction_RangeQueryThatMatchesNoShards() throws ExecutionExc QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents true, 0, - // All but 2 shards are skipped. TBH I don't know why this 2 shards are not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 2 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents false, 0, - // All but 1 shards are skipped. TBH I don't know why this 1 shard is not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 1 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); } diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index f502683e42eb2..30ec6630b9618 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -484,8 +484,7 @@ private void assertDocs( logger.info(searchResponse); assertEquals(0, searchResponse.getHits().getTotalHits().value()); assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); - // When all shards are skipped, at least one of them is queried in order to provide a proper search response. - assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + assertEquals(numberOfShards, searchResponse.getSkippedShards()); } finally { searchResponse.decRef(); } From a0c1df0d0c4ecdb39d05186f96c4ae976fde4f3e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:51:00 +0200 Subject: [PATCH 135/202] Speedup Query Phase Merging (#113355) Reducing contention and context switching in merging for the query phase by avoiding respining the merge task repeatedly, removing things that don't need synchronization from the synchronized blocks and merging repeated loops over the same query result arrays. --- .../search/QueryPhaseResultConsumer.java | 395 +++++++++--------- .../action/search/SearchPhaseController.java | 45 +- 2 files changed, 218 insertions(+), 222 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 89411ac302b10..6c654d9235ec2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -121,26 +120,50 @@ public void consumeResult(SearchPhaseResult result, Runnable next) { public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { if (pendingMerges.hasPendingMerges()) { throw new AssertionError("partial reduce in-flight"); - } else if (pendingMerges.hasFailure()) { - throw pendingMerges.getFailure(); + } + Exception failure = pendingMerges.failure.get(); + if (failure != null) { + throw failure; } // ensure consistent ordering pendingMerges.sortBuffer(); - final TopDocsStats topDocsStats = pendingMerges.consumeTopDocsStats(); - final List topDocsList = pendingMerges.consumeTopDocs(); + final TopDocsStats topDocsStats = pendingMerges.topDocsStats; + final int resultSize = pendingMerges.buffer.size() + (pendingMerges.mergeResult == null ? 0 : 1); + final List topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; + final List> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; + synchronized (pendingMerges) { + if (pendingMerges.mergeResult != null) { + if (topDocsList != null) { + topDocsList.add(pendingMerges.mergeResult.reducedTopDocs); + } + if (aggsList != null) { + aggsList.add(DelayableWriteable.referencing(pendingMerges.mergeResult.reducedAggs)); + } + } + for (QuerySearchResult result : pendingMerges.buffer) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); + } + if (aggsList != null) { + aggsList.add(result.getAggs()); + } + } + } SearchPhaseController.ReducedQueryPhase reducePhase; long breakerSize = pendingMerges.circuitBreakerBytes; try { - final List> aggsList = pendingMerges.getAggs(); - if (hasAggs) { + if (aggsList != null) { // Add an estimate of the final reduce size breakerSize = pendingMerges.addEstimateAndMaybeBreak(PendingMerges.estimateRamBytesUsedForReduce(breakerSize)); } reducePhase = SearchPhaseController.reducedQueryPhase( results.asList(), aggsList, - topDocsList, + topDocsList == null ? Collections.emptyList() : topDocsList, topDocsStats, pendingMerges.numReducePhases, false, @@ -183,65 +206,59 @@ private MergeResult partialReduce( // ensure consistent ordering Arrays.sort(toConsume, RESULT_COMPARATOR); - for (QuerySearchResult result : toConsume) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - + final List processedShards = new ArrayList<>(emptyResults); final TopDocs newTopDocs; + final InternalAggregations newAggs; + final List> aggsList; + final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); + if (hasAggs) { + aggsList = new ArrayList<>(resultSetSize); + if (lastMerge != null) { + aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); + } + } else { + aggsList = null; + } + List topDocsList; if (hasTopDocs) { - List topDocsList = new ArrayList<>(); + topDocsList = new ArrayList<>(resultSetSize); if (lastMerge != null) { topDocsList.add(lastMerge.reducedTopDocs); } - for (QuerySearchResult result : toConsume) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - newTopDocs = mergeTopDocs( - topDocsList, - // we have to merge here in the same way we collect on a shard - topNSize, - 0 - ); } else { - newTopDocs = null; + topDocsList = null; } - - final InternalAggregations newAggs; - if (hasAggs) { - try { - final List> aggsList = new ArrayList<>(); - if (lastMerge != null) { - aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); - } - for (QuerySearchResult result : toConsume) { + try { + for (QuerySearchResult result : toConsume) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + SearchShardTarget target = result.getSearchShardTarget(); + processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); + if (aggsList != null) { aggsList.add(result.getAggs()); } - newAggs = InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); - } finally { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); } } - } else { - newAggs = null; + // we have to merge here in the same way we collect on a shard + newTopDocs = topDocsList == null ? null : mergeTopDocs(topDocsList, topNSize, 0); + newAggs = aggsList == null + ? null + : InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); + } finally { + releaseAggs(toConsume); } - List processedShards = new ArrayList<>(emptyResults); if (lastMerge != null) { processedShards.addAll(lastMerge.processedShards); } - for (QuerySearchResult result : toConsume) { - SearchShardTarget target = result.getSearchShardTarget(); - processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } if (progressListener != SearchProgressListener.NOOP) { progressListener.notifyPartialReduce(processedShards, topDocsStats.getTotalHits(), newAggs, numReducePhases); } // we leave the results un-serialized because serializing is slow but we compute the serialized // size as an estimate of the memory used by the newly reduced aggregations. - long serializedSize = hasAggs ? DelayableWriteable.getSerializedSize(newAggs) : 0; - return new MergeResult(processedShards, newTopDocs, newAggs, hasAggs ? serializedSize : 0); + return new MergeResult(processedShards, newTopDocs, newAggs, newAggs != null ? DelayableWriteable.getSerializedSize(newAggs) : 0); } public int getNumReducePhases() { @@ -274,11 +291,7 @@ private class PendingMerges implements Releasable { @Override public synchronized void close() { - if (hasFailure()) { - assert circuitBreakerBytes == 0; - } else { - assert circuitBreakerBytes >= 0; - } + assert assertFailureAndBreakerConsistent(); releaseBuffer(); circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); @@ -290,8 +303,14 @@ public synchronized void close() { } } - synchronized Exception getFailure() { - return failure.get(); + private boolean assertFailureAndBreakerConsistent() { + boolean hasFailure = failure.get() != null; + if (hasFailure) { + assert circuitBreakerBytes == 0; + } else { + assert circuitBreakerBytes >= 0; + } + return true; } boolean hasFailure() { @@ -342,56 +361,71 @@ static long estimateRamBytesUsedForReduce(long size) { } public void consume(QuerySearchResult result, Runnable next) { - boolean executeNextImmediately = true; - synchronized (this) { - if (hasFailure() || result.isNull()) { - result.consumeAll(); - if (result.isNull()) { - SearchShardTarget target = result.getSearchShardTarget(); - emptyResults.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } - } else { - if (hasAggs) { - long aggsSize = ramBytesUsedQueryResult(result); - try { - addEstimateAndMaybeBreak(aggsSize); - } catch (Exception exc) { - result.releaseAggs(); - releaseBuffer(); - onMergeFailure(exc); - next.run(); - return; + if (hasFailure()) { + result.consumeAll(); + next.run(); + } else if (result.isNull()) { + result.consumeAll(); + SearchShardTarget target = result.getSearchShardTarget(); + SearchShard searchShard = new SearchShard(target.getClusterAlias(), target.getShardId()); + synchronized (this) { + emptyResults.add(searchShard); + } + next.run(); + } else { + final long aggsSize = ramBytesUsedQueryResult(result); + boolean executeNextImmediately = true; + boolean hasFailure = false; + synchronized (this) { + if (hasFailure()) { + hasFailure = true; + } else { + if (hasAggs) { + try { + addEstimateAndMaybeBreak(aggsSize); + } catch (Exception exc) { + releaseBuffer(); + onMergeFailure(exc); + hasFailure = true; + } + } + if (hasFailure == false) { + aggsCurrentBufferSize += aggsSize; + // add one if a partial merge is pending + int size = buffer.size() + (hasPartialReduce ? 1 : 0); + if (size >= batchReduceSize) { + hasPartialReduce = true; + executeNextImmediately = false; + QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); + MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); + aggsCurrentBufferSize = 0; + buffer.clear(); + emptyResults.clear(); + queue.add(task); + tryExecuteNext(); + } + buffer.add(result); } - aggsCurrentBufferSize += aggsSize; - } - // add one if a partial merge is pending - int size = buffer.size() + (hasPartialReduce ? 1 : 0); - if (size >= batchReduceSize) { - hasPartialReduce = true; - executeNextImmediately = false; - QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); - MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); - aggsCurrentBufferSize = 0; - buffer.clear(); - emptyResults.clear(); - queue.add(task); - tryExecuteNext(); } - buffer.add(result); } - } - if (executeNextImmediately) { - next.run(); + if (hasFailure) { + result.consumeAll(); + } + if (executeNextImmediately) { + next.run(); + } } } private void releaseBuffer() { - buffer.forEach(QuerySearchResult::releaseAggs); + for (QuerySearchResult querySearchResult : buffer) { + querySearchResult.releaseAggs(); + } buffer.clear(); } private synchronized void onMergeFailure(Exception exc) { - if (hasFailure()) { + if (failure.compareAndSet(null, exc) == false) { assert circuitBreakerBytes == 0; return; } @@ -401,79 +435,89 @@ private synchronized void onMergeFailure(Exception exc) { circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); circuitBreakerBytes = 0; } - failure.compareAndSet(null, exc); - final List toCancels = new ArrayList<>(); - toCancels.add(() -> onPartialMergeFailure.accept(exc)); + onPartialMergeFailure.accept(exc); final MergeTask task = runningTask.getAndSet(null); if (task != null) { - toCancels.add(task::cancel); + task.cancel(); } MergeTask mergeTask; while ((mergeTask = queue.pollFirst()) != null) { - toCancels.add(mergeTask::cancel); + mergeTask.cancel(); } mergeResult = null; - Releasables.close(toCancels); - } - - private void onAfterMerge(MergeTask task, MergeResult newResult, long estimatedSize) { - synchronized (this) { - if (hasFailure()) { - return; - } - runningTask.compareAndSet(task, null); - mergeResult = newResult; - if (hasAggs) { - // Update the circuit breaker to remove the size of the source aggregations - // and replace the estimation with the serialized size of the newly reduced result. - long newSize = mergeResult.estimatedSize - estimatedSize; - addWithoutBreaking(newSize); - logger.trace( - "aggs partial reduction [{}->{}] max [{}]", - estimatedSize, - mergeResult.estimatedSize, - maxAggsCurrentBufferSize - ); - } - task.consumeListener(); - } } private void tryExecuteNext() { final MergeTask task; synchronized (this) { - if (queue.isEmpty() || hasFailure() || runningTask.get() != null) { + if (hasFailure() || runningTask.get() != null) { return; } task = queue.poll(); - runningTask.compareAndSet(null, task); + runningTask.set(task); + } + if (task == null) { + return; } executor.execute(new AbstractRunnable() { @Override protected void doRun() { - final MergeResult thisMergeResult = mergeResult; - long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + task.aggsBufferSize; - final MergeResult newMerge; - final QuerySearchResult[] toConsume = task.consumeBuffer(); - if (toConsume == null) { - return; - } - try { - long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); - addEstimateAndMaybeBreak(estimatedMergeSize); - estimatedTotalSize += estimatedMergeSize; - ++numReducePhases; - newMerge = partialReduce(toConsume, task.emptyResults, topDocsStats, thisMergeResult, numReducePhases); - } catch (Exception t) { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + MergeTask mergeTask = task; + QuerySearchResult[] toConsume = mergeTask.consumeBuffer(); + while (mergeTask != null) { + final MergeResult thisMergeResult = mergeResult; + long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; + final MergeResult newMerge; + try { + long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); + addEstimateAndMaybeBreak(estimatedMergeSize); + estimatedTotalSize += estimatedMergeSize; + ++numReducePhases; + newMerge = partialReduce(toConsume, mergeTask.emptyResults, topDocsStats, thisMergeResult, numReducePhases); + } catch (Exception t) { + QueryPhaseResultConsumer.releaseAggs(toConsume); + onMergeFailure(t); + return; + } + synchronized (QueryPhaseResultConsumer.this) { + if (hasFailure()) { + return; + } + mergeResult = newMerge; + if (hasAggs) { + // Update the circuit breaker to remove the size of the source aggregations + // and replace the estimation with the serialized size of the newly reduced result. + long newSize = mergeResult.estimatedSize - estimatedTotalSize; + addWithoutBreaking(newSize); + if (logger.isTraceEnabled()) { + logger.trace( + "aggs partial reduction [{}->{}] max [{}]", + estimatedTotalSize, + mergeResult.estimatedSize, + maxAggsCurrentBufferSize + ); + } + } + } + Runnable r = mergeTask.consumeListener(); + synchronized (QueryPhaseResultConsumer.this) { + while (true) { + mergeTask = queue.poll(); + runningTask.set(mergeTask); + if (mergeTask == null) { + break; + } + toConsume = mergeTask.consumeBuffer(); + if (toConsume != null) { + break; + } + } + } + if (r != null) { + r.run(); } - onMergeFailure(t); - return; } - onAfterMerge(task, newMerge, estimatedTotalSize); - tryExecuteNext(); } @Override @@ -483,43 +527,6 @@ public void onFailure(Exception exc) { }); } - public synchronized TopDocsStats consumeTopDocsStats() { - for (QuerySearchResult result : buffer) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - return topDocsStats; - } - - public synchronized List consumeTopDocs() { - if (hasTopDocs == false) { - return Collections.emptyList(); - } - List topDocsList = new ArrayList<>(); - if (mergeResult != null) { - topDocsList.add(mergeResult.reducedTopDocs); - } - for (QuerySearchResult result : buffer) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - return topDocsList; - } - - public synchronized List> getAggs() { - if (hasAggs == false) { - return Collections.emptyList(); - } - List> aggsList = new ArrayList<>(); - if (mergeResult != null) { - aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); - } - for (QuerySearchResult result : buffer) { - aggsList.add(result.getAggs()); - } - return aggsList; - } - public synchronized void releaseAggs() { if (hasAggs) { for (QuerySearchResult result : buffer) { @@ -529,6 +536,12 @@ public synchronized void releaseAggs() { } } + private static void releaseAggs(QuerySearchResult... toConsume) { + for (QuerySearchResult result : toConsume) { + result.releaseAggs(); + } + } + private record MergeResult( List processedShards, TopDocs reducedTopDocs, @@ -555,21 +568,21 @@ public synchronized QuerySearchResult[] consumeBuffer() { return toRet; } - public void consumeListener() { - if (next != null) { - next.run(); - next = null; - } + public synchronized Runnable consumeListener() { + Runnable n = next; + next = null; + return n; } - public synchronized void cancel() { + public void cancel() { QuerySearchResult[] buffer = consumeBuffer(); if (buffer != null) { - for (QuerySearchResult result : buffer) { - result.releaseAggs(); - } + releaseAggs(buffer); + } + Runnable next = consumeListener(); + if (next != null) { + next.run(); } - consumeListener(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index ca9c4ab44c423..b118c2560925e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; @@ -190,7 +191,7 @@ public static List mergeKnnResults(SearchRequest request, List topDocs, + final List topDocs, int from, int size, List reducedCompletionSuggestions @@ -233,22 +234,22 @@ static SortedTopDocs sortDocs( return new SortedTopDocs(scoreDocs, isSortedByField, sortFields, groupField, groupValues, numSuggestDocs); } - static TopDocs mergeTopDocs(Collection results, int topN, int from) { + static TopDocs mergeTopDocs(List results, int topN, int from) { if (results.isEmpty()) { return null; } - final TopDocs topDocs = results.stream().findFirst().get(); + final TopDocs topDocs = results.getFirst(); final TopDocs mergedTopDocs; final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; } else if (topDocs instanceof TopFieldGroups firstTopDocs) { final Sort sort = new Sort(firstTopDocs.fields); - final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[numShards]); + final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[0]); mergedTopDocs = TopFieldGroups.merge(sort, from, topN, shardTopDocs, false); } else if (topDocs instanceof TopFieldDocs firstTopDocs) { final Sort sort = checkSameSortTypes(results, firstTopDocs.fields); - final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); + final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[0]); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); } else { final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]); @@ -524,17 +525,7 @@ public AggregationReduceContext forFinalReduction() { topDocs.add(td.topDocs); } } - return reducedQueryPhase( - queryResults, - Collections.emptyList(), - topDocs, - topDocsStats, - 0, - true, - aggReduceContextBuilder, - null, - true - ); + return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true); } /** @@ -548,7 +539,7 @@ public AggregationReduceContext forFinalReduction() { */ static ReducedQueryPhase reducedQueryPhase( Collection queryResults, - List> bufferedAggs, + @Nullable List> bufferedAggs, List bufferedTopDocs, TopDocsStats topDocsStats, int numReducePhases, @@ -642,7 +633,12 @@ static ReducedQueryPhase reducedQueryPhase( reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions)); reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class); } - final InternalAggregations aggregations = reduceAggs(aggReduceContextBuilder, performFinalReduce, bufferedAggs); + final InternalAggregations aggregations = bufferedAggs == null + ? null + : InternalAggregations.topLevelReduceDelayable( + bufferedAggs, + performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() + ); final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults); @@ -681,19 +677,6 @@ static ReducedQueryPhase reducedQueryPhase( ); } - private static InternalAggregations reduceAggs( - AggregationReduceContext.Builder aggReduceContextBuilder, - boolean performFinalReduce, - List> toReduce - ) { - return toReduce.isEmpty() - ? null - : InternalAggregations.topLevelReduceDelayable( - toReduce, - performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() - ); - } - /** * Checks that query results from all shards have consistent unsigned_long format. * Sort queries on a field that has long type in one index, and unsigned_long in another index From a02f68217a5bfb226fbcd3b26cfc2b125806be94 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:53:27 +0200 Subject: [PATCH 136/202] Lazy initialize HttpRouteStatsTracker in MethodHandlers (#114107) We use about 1M for the route stats trackers instances per ES instance. Making this lazy init should come at a trivial overhead and in fact makes the computation of the node stats cheaper by saving spurious sums on 0-valued long adders. --- .../elasticsearch/http/HttpRouteStats.java | 2 + .../elasticsearch/rest/MethodHandlers.java | 42 ++++++++++++++----- .../elasticsearch/rest/RestController.java | 25 +++++------ 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java index 5be1ae9312c46..a15b929fd3c1b 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java +++ b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java @@ -49,6 +49,8 @@ public record HttpRouteStats( long[] responseTimeHistogram ) implements Writeable, ToXContentObject { + public static final HttpRouteStats EMPTY = new HttpRouteStats(0, 0, new long[0], 0, 0, new long[0], new long[0]); + public HttpRouteStats(StreamInput in) throws IOException { this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLongArray()); } diff --git a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java index a947ddce2b9f3..2f53f48f9ae5b 100644 --- a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java @@ -13,6 +13,8 @@ import org.elasticsearch.http.HttpRouteStats; import org.elasticsearch.http.HttpRouteStatsTracker; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.EnumMap; import java.util.Map; import java.util.Set; @@ -25,7 +27,18 @@ final class MethodHandlers { private final String path; private final Map> methodHandlers; - private final HttpRouteStatsTracker statsTracker = new HttpRouteStatsTracker(); + @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap + private volatile HttpRouteStatsTracker statsTracker; + + private static final VarHandle STATS_TRACKER_HANDLE; + + static { + try { + STATS_TRACKER_HANDLE = MethodHandles.lookup().findVarHandle(MethodHandlers.class, "statsTracker", HttpRouteStatsTracker.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new ExceptionInInitializerError(e); + } + } MethodHandlers(String path) { this.path = path; @@ -73,19 +86,26 @@ Set getValidMethods() { return methodHandlers.keySet(); } - public void addRequestStats(int contentLength) { - statsTracker.addRequestStats(contentLength); - } - - public void addResponseStats(long contentLength) { - statsTracker.addResponseStats(contentLength); + public HttpRouteStats getStats() { + var tracker = existingStatsTracker(); + if (tracker == null) { + return HttpRouteStats.EMPTY; + } + return tracker.getStats(); } - public void addResponseTime(long timeMillis) { - statsTracker.addResponseTime(timeMillis); + public HttpRouteStatsTracker statsTracker() { + var tracker = existingStatsTracker(); + if (tracker == null) { + var newTracker = new HttpRouteStatsTracker(); + if ((tracker = (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.compareAndExchange(this, null, newTracker)) == null) { + tracker = newTracker; + } + } + return tracker; } - public HttpRouteStats getStats() { - return statsTracker.getStats(); + private HttpRouteStatsTracker existingStatsTracker() { + return (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.getAcquire(this); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index c2064fdd931de..7446ec5bb6717 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpHeadersValidationException; import org.elasticsearch.http.HttpRouteStats; +import org.elasticsearch.http.HttpRouteStatsTracker; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; @@ -879,7 +880,7 @@ public void sendResponse(RestResponse response) { private static final class ResourceHandlingHttpChannel extends DelegatingRestChannel { private final CircuitBreakerService circuitBreakerService; private final int contentLength; - private final MethodHandlers methodHandlers; + private final HttpRouteStatsTracker statsTracker; private final long startTime; private final AtomicBoolean closed = new AtomicBoolean(); @@ -892,7 +893,7 @@ private static final class ResourceHandlingHttpChannel extends DelegatingRestCha super(delegate); this.circuitBreakerService = circuitBreakerService; this.contentLength = contentLength; - this.methodHandlers = methodHandlers; + this.statsTracker = methodHandlers.statsTracker(); this.startTime = rawRelativeTimeInMillis(); } @@ -901,12 +902,12 @@ public void sendResponse(RestResponse response) { boolean success = false; try { close(); - methodHandlers.addRequestStats(contentLength); - methodHandlers.addResponseTime(rawRelativeTimeInMillis() - startTime); + statsTracker.addRequestStats(contentLength); + statsTracker.addResponseTime(rawRelativeTimeInMillis() - startTime); if (response.isChunked() == false) { - methodHandlers.addResponseStats(response.content().length()); + statsTracker.addResponseStats(response.content().length()); } else { - final var responseLengthRecorder = new ResponseLengthRecorder(methodHandlers); + final var responseLengthRecorder = new ResponseLengthRecorder(statsTracker); final var headers = response.getHeaders(); response = RestResponse.chunked( response.status(), @@ -941,11 +942,11 @@ private void close() { } } - private static class ResponseLengthRecorder extends AtomicReference implements Releasable { + private static class ResponseLengthRecorder extends AtomicReference implements Releasable { private long responseLength; - private ResponseLengthRecorder(MethodHandlers methodHandlers) { - super(methodHandlers); + private ResponseLengthRecorder(HttpRouteStatsTracker routeStatsTracker) { + super(routeStatsTracker); } @Override @@ -953,11 +954,11 @@ public void close() { // closed just before sending the last chunk, and also when the whole RestResponse is closed since the client might abort the // connection before we send the last chunk, in which case we won't have recorded the response in the // stats yet; thus we need run-once semantics here: - final var methodHandlers = getAndSet(null); - if (methodHandlers != null) { + final var routeStatsTracker = getAndSet(null); + if (routeStatsTracker != null) { // if we started sending chunks then we're closed on the transport worker, no need for sync assert responseLength == 0L || Transports.assertTransportThread(); - methodHandlers.addResponseStats(responseLength); + routeStatsTracker.addResponseStats(responseLength); } } From ca4009e29823ae3eaaad26b75d8bb47ade5e218c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 09:13:18 +0200 Subject: [PATCH 137/202] [DOCS] Adds stream inference API docs (#115333) Co-authored-by: Pat Whelan --- .../inference/inference-apis.asciidoc | 2 + .../inference/stream-inference.asciidoc | 122 ++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 docs/reference/inference/stream-inference.asciidoc diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index ddcff1abc7dce..1206cb02ba89a 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -19,6 +19,7 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> * <> +* <> * <> [[inference-landscape]] @@ -56,6 +57,7 @@ include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::stream-inference.asciidoc[] include::update-inference.asciidoc[] include::service-alibabacloud-ai-search.asciidoc[] include::service-amazon-bedrock.asciidoc[] diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc new file mode 100644 index 0000000000000..e66acd630cb3e --- /dev/null +++ b/docs/reference/inference/stream-inference.asciidoc @@ -0,0 +1,122 @@ +[role="xpack"] +[[stream-inference-api]] +=== Stream inference API + +Streams a chat completion response. + +IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. + + +[discrete] +[[stream-inference-api-request]] +==== {api-request-title} + +`POST /_inference//_stream` + +`POST /_inference///_stream` + + +[discrete] +[[stream-inference-api-prereqs]] +==== {api-prereq-title} + +* Requires the `monitor_inference` <> +(the built-in `inference_admin` and `inference_user` roles grant this privilege) +* You must use a client that supports streaming. + + +[discrete] +[[stream-inference-api-desc]] +==== {api-description-title} + +The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `completion` task type. + + +[discrete] +[[stream-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +The unique identifier of the {infer} endpoint. + + +``:: +(Optional, string) +The type of {infer} task that the model performs. + + +[discrete] +[[stream-inference-api-request-body]] +==== {api-request-body-title} + +`input`:: +(Required, string or array of strings) +The text on which you want to perform the {infer} task. +`input` can be a single string or an array. ++ +-- +[NOTE] +==== +Inference endpoints for the `completion` task type currently only support a +single string as input. +==== +-- + + +[discrete] +[[stream-inference-api-example]] +==== {api-examples-title} + +The following example performs a completion on the example question with streaming. + + +[source,console] +------------------------------------------------------------ +POST _inference/completion/openai-completion/_stream +{ + "input": "What is Elastic?" +} +------------------------------------------------------------ +// TEST[skip:TBD] + + +The API returns the following response: + + +[source,txt] +------------------------------------------------------------ +event: message +data: { + "completion":[{ + "delta":"Elastic" + }] +} + +event: message +data: { + "completion":[{ + "delta":" is" + }, + { + "delta":" a" + } + ] +} + +event: message +data: { + "completion":[{ + "delta":" software" + }, + { + "delta":" company" + }] +} + +(...) +------------------------------------------------------------ +// NOTCONSOLE From 6688fe225584cfa8d12ebb5e56662918a593f690 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:26:12 +0300 Subject: [PATCH 138/202] Remove excluded tests from rest compat (#115617) --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index 5142632a36006..ba5ac7b0c7317 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,20 +32,6 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -tasks.named("yamlRestCompatTestTransform").configure ({ task -> - task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", - "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") -}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } From e7897bdeff7f4ec76e8a0801c86f5dea11cacabd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:57:12 +0200 Subject: [PATCH 139/202] Return `_ignored_source` as a top level array field (#115328) This PR introduces a fix for the `fields` and `stored_fields` APIs and the way `_ignored_source` field is handled: 1. **Return `_ignored_source` as a top-level array metadata field**: - The `_ignored_source` field is now returned as a top-level array in the metadata as done with other metadata fields. 2. **Return `_ignored_source` as an array of values**: - Even when there is only a single ignored field, `_ignored_source` will now be returned as an array of values. This is done to be consistent with how the `_ignored` field is returned. Without this fix, we would return the `_ignored_source` field twice, as a top-level field and as part of the `fields` array. Also, without this fix, we would only return a single value instead of all ignored field values. --- .../mapper/IgnoredSourceFieldMapper.java | 3 + .../index/mapper/MapperFeatures.java | 1 + .../org/elasticsearch/search/SearchHit.java | 3 +- .../fetch/subphase/FetchFieldsPhase.java | 25 ++- .../index/get/DocumentFieldTests.java | 5 +- .../search/SearchResponseUtils.java | 3 +- .../rest-api-spec/test/20_ignored_source.yml | 158 +++++++++++++++++- 7 files changed, 182 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 70d73fc2ffb9a..7e2bebfd403cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD = new NodeFeature( + "mapper.ignored_source_as_top_level_metadata_array_field" + ); static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( "mapper.ignored_source.always_store_object_arrays_in_nested" ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 026c7c98d7aeb..a5f173afffba2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -63,6 +63,7 @@ public Set getTestFeatures() { RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 1611c95d99df4..98f7c92d9997a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.SimpleRefCounted; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -847,7 +848,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } // _ignored is the only multi-valued meta field // TODO: can we avoid having an exception here? - if (field.getName().equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(field.getName()) || IgnoredSourceFieldMapper.NAME.equals(field.getName())) { builder.field(field.getName(), field.getValues()); } else { builder.field(field.getName(), field.getValue()); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java index 03bfbd40d97be..e0cb5a668b4ab 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java @@ -57,13 +57,28 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) { return null; } + // NOTE: FieldFetcher for non-metadata fields, as well as `_id` and `_source`. + // We need to retain `_id` and `_source` here to correctly populate the `StoredFieldSpecs` created by the + // `FieldFetcher` constructor. final SearchExecutionContext searchExecutionContext = fetchContext.getSearchExecutionContext(); - final FieldFetcher fieldFetcher = fetchFieldsContext == null ? null - : fetchFieldsContext.fields() == null ? null - : fetchFieldsContext.fields().isEmpty() ? null - : FieldFetcher.create(searchExecutionContext, fetchFieldsContext.fields()); + final FieldFetcher fieldFetcher = (fetchFieldsContext == null + || fetchFieldsContext.fields() == null + || fetchFieldsContext.fields().isEmpty()) + ? null + : FieldFetcher.create( + searchExecutionContext, + fetchFieldsContext.fields() + .stream() + .filter( + fieldAndFormat -> (searchExecutionContext.isMetadataField(fieldAndFormat.field) == false + || searchExecutionContext.getFieldType(fieldAndFormat.field).isStored() == false + || IdFieldMapper.NAME.equals(fieldAndFormat.field) + || SourceFieldMapper.NAME.equals(fieldAndFormat.field)) + ) + .toList() + ); - // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext`) like for instance the _ignored source field + // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext) like for instance the _ignored source field final Set fetchContextMetadataFields = new HashSet<>(); if (fetchFieldsContext != null && fetchFieldsContext.fields() != null && fetchFieldsContext.fields().isEmpty() == false) { for (final FieldAndFormat fieldAndFormat : fetchFieldsContext.fields()) { diff --git a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 76426e9df83d8..8a27c3545a110 100644 --- a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -122,7 +123,7 @@ public static Tuple randomDocumentField( if (isMetafield) { String metaField = randomValueOtherThanMany(excludeMetaFieldFilter, () -> randomFrom(IndicesModule.getBuiltInMetadataFields())); DocumentField documentField; - if (metaField.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(metaField) || IgnoredSourceFieldMapper.NAME.equals(metaField)) { int numValues = randomIntBetween(1, 3); List ignoredFields = new ArrayList<>(numValues); for (int i = 0; i < numValues; i++) { @@ -130,7 +131,7 @@ public static Tuple randomDocumentField( } documentField = new DocumentField(metaField, ignoredFields); } else { - // meta fields are single value only, besides _ignored + // meta fields are single value only, besides _ignored and _ignored_source documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); } return Tuple.tuple(documentField, documentField); diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index df1ea6b756405..b0edbb829df2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; @@ -83,7 +84,7 @@ public enum SearchResponseUtils { SearchHit.METADATA_FIELDS, v -> new HashMap() ); - if (fieldName.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(fieldName) || IgnoredSourceFieldMapper.NAME.equals(fieldName)) { fieldMap.put(fieldName, new DocumentField(fieldName, (List) fieldValue)); } else { fieldMap.put(fieldName, new DocumentField(fieldName, Collections.singletonList(fieldValue))); diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index c54edb0994860..2f111d579ebb1 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -27,6 +27,10 @@ setup: --- "fetch stored fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -40,6 +44,10 @@ setup: --- "fetch fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -53,6 +61,10 @@ setup: --- "fetch stored fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -62,10 +74,14 @@ setup: stored_fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fetch fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -75,10 +91,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -92,10 +112,14 @@ setup: - match: { hits.total.value: 1 } - match: { hits.hits.0.fields.object: null } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: search: index: test @@ -108,6 +132,10 @@ setup: --- "fields with ignored source in stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -118,11 +146,15 @@ setup: fields: [ object ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } - match: { hits.hits.0.fields: null } --- "fields with ignored source in fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -133,10 +165,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "ignored source via fields and wildcard stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -147,10 +183,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and ignored source via stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -161,4 +201,108 @@ setup: fields: [ "*" ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + +--- +ignored source array via fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-fields + body: + fields: [ "_ignored_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field + +--- +ignored source array via stored_fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-stored-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-stored-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-stored-fields + body: + # NOTE: when using synthetic source `_source` field needs to be explicitly requested via `stored_fields`, + # a wildcard request would not include it. + stored_fields: [ "_ignored_source", "_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field From 3d307e0d7867116585dccfb335e0cab0c192bdb9 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 10:09:53 +0200 Subject: [PATCH 140/202] Don't return TEXT type for functions that take TEXT (#114334) Always return `KEYWORD` for functions that previously returned `TEXT`, because any change to the value, no matter how small, is enough to render meaningless the original analyzer associated with the `TEXT` field value. In principle, if the attribute is no longer the original `FieldAttribute`, it can no longer claim to have the type `TEXT`. This has been done for all functions: conversion functions, aggregating functions, multi-value functions. There were several that already produced `KEYWORD` for `TEXT` input (eg. ToString, FromBase64 and ToBase64, MvZip, ToLower, ToUpper, DateFormat, Concat, Left, Repeat, Replace, Right, Split, Substring), but many others that incorrectly claimed to produce `TEXT`, while this was really a false claim. This PR makes that now strict, and includes changes to the functions' units tests to disallow the tests to expect any functions output to be `TEXT`. One side effect of this change is that methods that take multiple parameters that require all of them to have the same type, will now treat TEXT and KEYWORD the same. This was already the case for functions like `Concat`, but is now also the case for `Greatest`, `Least`, `Case`, `Coalesce` and `MvAppend`. An associated change is that the type casting operator `::text` has been entirely removed. It used to map onto the `ToString` function which returned type KEYWORD, and so `::text` really produced a `KEYWORD`, which is a lie, or at least a `bug`, which is now fixed. Should we ever wish to actually produce real `TEXT`, we might love the fact that this operator has been freed up for future use (although it seems likely that function will require parameters to specify the analyzer, so might never be an operator again). ### Backwards compatibility issues: This is a change that will fail BWC tests, since we have many tests that assert on TEXT output to functions. For this reason we needed to block two scenarios: * We used the capability `functions_never_emit_text` to prevent 7 csv-spec tests and 2 yaml tests from being run against older versions that still emit text. * We used `skipTest` to also block those two yaml tests from being run against the latest build, but using older yaml files downloaded (as far back as 8.14). In all cases the change observed in these tests was simply the results columns no longer having `text` type, and instead being `keyword`. --------- Co-authored-by: Luigi Dell'Aquila --- docs/changelog/114334.yaml | 7 +++ .../functions/kibana/definition/case.json | 52 +++++++++++++++- .../functions/kibana/definition/coalesce.json | 4 +- .../functions/kibana/definition/greatest.json | 4 +- .../functions/kibana/definition/least.json | 4 +- .../functions/kibana/definition/ltrim.json | 2 +- .../esql/functions/kibana/definition/max.json | 2 +- .../esql/functions/kibana/definition/min.json | 2 +- .../kibana/definition/mv_append.json | 2 +- .../kibana/definition/mv_dedupe.json | 2 +- .../functions/kibana/definition/mv_first.json | 2 +- .../functions/kibana/definition/mv_last.json | 2 +- .../functions/kibana/definition/mv_max.json | 2 +- .../functions/kibana/definition/mv_min.json | 2 +- .../functions/kibana/definition/mv_slice.json | 2 +- .../functions/kibana/definition/mv_sort.json | 2 +- .../functions/kibana/definition/reverse.json | 2 +- .../functions/kibana/definition/rtrim.json | 2 +- .../functions/kibana/definition/to_lower.json | 2 +- .../functions/kibana/definition/to_upper.json | 2 +- .../esql/functions/kibana/definition/top.json | 2 +- .../functions/kibana/definition/trim.json | 2 +- .../functions/kibana/definition/values.json | 2 +- .../esql/functions/kibana/inline_cast.json | 1 - .../esql/functions/types/case.asciidoc | 6 +- .../esql/functions/types/coalesce.asciidoc | 4 +- .../esql/functions/types/greatest.asciidoc | 4 +- .../esql/functions/types/least.asciidoc | 4 +- .../esql/functions/types/ltrim.asciidoc | 2 +- .../esql/functions/types/max.asciidoc | 2 +- .../esql/functions/types/min.asciidoc | 2 +- .../esql/functions/types/mv_append.asciidoc | 2 +- .../esql/functions/types/mv_dedupe.asciidoc | 2 +- .../esql/functions/types/mv_first.asciidoc | 2 +- .../esql/functions/types/mv_last.asciidoc | 2 +- .../esql/functions/types/mv_max.asciidoc | 2 +- .../esql/functions/types/mv_min.asciidoc | 2 +- .../esql/functions/types/mv_slice.asciidoc | 2 +- .../esql/functions/types/mv_sort.asciidoc | 2 +- .../esql/functions/types/reverse.asciidoc | 2 +- .../esql/functions/types/rtrim.asciidoc | 2 +- .../esql/functions/types/to_lower.asciidoc | 2 +- .../esql/functions/types/to_upper.asciidoc | 2 +- .../esql/functions/types/top.asciidoc | 2 +- .../esql/functions/types/trim.asciidoc | 2 +- .../esql/functions/types/values.asciidoc | 2 +- x-pack/plugin/build.gradle | 2 + .../xpack/esql/core/type/DataType.java | 4 ++ .../src/main/resources/convert.csv-spec | 6 +- .../src/main/resources/stats.csv-spec | 14 +++-- .../src/main/resources/stats_top.csv-spec | 6 +- .../src/main/resources/string.csv-spec | 3 +- .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../expression/function/aggregate/Max.java | 4 +- .../expression/function/aggregate/Min.java | 4 +- .../expression/function/aggregate/Top.java | 4 +- .../expression/function/aggregate/Values.java | 4 +- .../function/scalar/UnaryScalarFunction.java | 2 +- .../function/scalar/conditional/Case.java | 5 +- .../function/scalar/conditional/Greatest.java | 6 +- .../function/scalar/conditional/Least.java | 6 +- .../function/scalar/multivalue/MvAppend.java | 7 +-- .../function/scalar/multivalue/MvDedupe.java | 1 - .../function/scalar/multivalue/MvFirst.java | 1 - .../function/scalar/multivalue/MvLast.java | 1 - .../function/scalar/multivalue/MvMax.java | 2 +- .../function/scalar/multivalue/MvMin.java | 2 +- .../function/scalar/multivalue/MvSlice.java | 3 +- .../function/scalar/multivalue/MvSort.java | 4 +- .../function/scalar/nulls/Coalesce.java | 5 +- .../function/scalar/string/LTrim.java | 2 +- .../function/scalar/string/RTrim.java | 2 +- .../function/scalar/string/Reverse.java | 2 +- .../function/scalar/string/ToLower.java | 4 +- .../function/scalar/string/ToUpper.java | 4 +- .../function/scalar/string/Trim.java | 2 +- .../esql/type/EsqlDataTypeConverter.java | 1 - .../xpack/esql/analysis/ParsingTests.java | 3 - .../expression/function/TestCaseSupplier.java | 2 +- .../function/aggregate/MaxTests.java | 2 +- .../function/aggregate/MinTests.java | 2 +- .../scalar/conditional/CaseTests.java | 59 ++++++++++++++++++- .../function/scalar/string/ToLowerTests.java | 2 +- .../function/scalar/string/ToUpperTests.java | 2 +- .../rest-api-spec/test/esql/80_text.yml | 24 ++++++-- 85 files changed, 253 insertions(+), 123 deletions(-) create mode 100644 docs/changelog/114334.yaml diff --git a/docs/changelog/114334.yaml b/docs/changelog/114334.yaml new file mode 100644 index 0000000000000..d0fefe40c6970 --- /dev/null +++ b/docs/changelog/114334.yaml @@ -0,0 +1,7 @@ +pr: 114334 +summary: Don't return TEXT type for functions that take TEXT +area: ES|QL +type: bug +issues: + - 111537 + - 114333 diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 1cf2c6ce7a579..bf498f690551c 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -424,6 +424,30 @@ "variadic" : true, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "keyword", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "text", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" + }, { "params" : [ { @@ -482,7 +506,31 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "text", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "keyword", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" }, { "params" : [ @@ -506,7 +554,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index 9ebc5a97229cd..7f49195190951 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -242,7 +242,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -260,7 +260,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/greatest.json b/docs/reference/esql/functions/kibana/definition/greatest.json index 2818a5ac56339..eebb4fad1eb1d 100644 --- a/docs/reference/esql/functions/kibana/definition/greatest.json +++ b/docs/reference/esql/functions/kibana/definition/greatest.json @@ -189,7 +189,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -207,7 +207,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/least.json b/docs/reference/esql/functions/kibana/definition/least.json index 7b545896f4ddc..02fa58f92eaef 100644 --- a/docs/reference/esql/functions/kibana/definition/least.json +++ b/docs/reference/esql/functions/kibana/definition/least.json @@ -188,7 +188,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -206,7 +206,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/ltrim.json b/docs/reference/esql/functions/kibana/definition/ltrim.json index e85c2d42dedee..6d992b9db7b2c 100644 --- a/docs/reference/esql/functions/kibana/definition/ltrim.json +++ b/docs/reference/esql/functions/kibana/definition/ltrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 09ca95a0afeff..45fd26571b091 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 3e87b3e9038e1..ae71fba049dbe 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index c14a3337a25a7..81c1b777be498 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -218,7 +218,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 9bb0935c6a5de..bfca58bc3e140 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -147,7 +147,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index 80e761faafab9..a2b6358023e4b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index fb16400f86e62..b6dc268af5305 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 17cdae8a3d39c..27d2b010dc02c 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index 3718a0f6e1de5..410e97335687f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 399a6145b040e..dbbfe0ffb5a78 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -290,7 +290,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index c78ade7c8a94f..4cb255fb0afcb 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/reverse.json b/docs/reference/esql/functions/kibana/definition/reverse.json index 1b222691530f2..0652d9cfa6b15 100644 --- a/docs/reference/esql/functions/kibana/definition/reverse.json +++ b/docs/reference/esql/functions/kibana/definition/reverse.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/rtrim.json b/docs/reference/esql/functions/kibana/definition/rtrim.json index 028f442de9632..9c8a7578ed789 100644 --- a/docs/reference/esql/functions/kibana/definition/rtrim.json +++ b/docs/reference/esql/functions/kibana/definition/rtrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_lower.json b/docs/reference/esql/functions/kibana/definition/to_lower.json index f9b49a29a8c7d..07bb057fe080d 100644 --- a/docs/reference/esql/functions/kibana/definition/to_lower.json +++ b/docs/reference/esql/functions/kibana/definition/to_lower.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_upper.json b/docs/reference/esql/functions/kibana/definition/to_upper.json index edf36a982f56b..caa9d563b08b1 100644 --- a/docs/reference/esql/functions/kibana/definition/to_upper.json +++ b/docs/reference/esql/functions/kibana/definition/to_upper.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 7fa4ff123eec7..82bd80636152c 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -194,7 +194,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/trim.json b/docs/reference/esql/functions/kibana/definition/trim.json index 6edf13e588e62..45805b3bfb054 100644 --- a/docs/reference/esql/functions/kibana/definition/trim.json +++ b/docs/reference/esql/functions/kibana/definition/trim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index e289173d9d989..ae69febd4f755 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/inline_cast.json b/docs/reference/esql/functions/kibana/inline_cast.json index 81a1966773238..9f663c8d0d6a3 100644 --- a/docs/reference/esql/functions/kibana/inline_cast.json +++ b/docs/reference/esql/functions/kibana/inline_cast.json @@ -15,7 +15,6 @@ "keyword" : "to_string", "long" : "to_long", "string" : "to_string", - "text" : "to_string", "time_duration" : "to_timeduration", "unsigned_long" : "to_unsigned_long", "version" : "to_version" diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index e8aa3eaf5daae..c6fb6a091e9d0 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -24,11 +24,13 @@ boolean | integer | | integer boolean | ip | ip | ip boolean | ip | | ip boolean | keyword | keyword | keyword +boolean | keyword | text | keyword boolean | keyword | | keyword boolean | long | long | long boolean | long | | long -boolean | text | text | text -boolean | text | | text +boolean | text | keyword | keyword +boolean | text | text | keyword +boolean | text | | keyword boolean | unsigned_long | unsigned_long | unsigned_long boolean | unsigned_long | | unsigned_long boolean | version | version | version diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 368a12db0dca4..23a249494e0a2 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -19,7 +19,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/greatest.asciidoc b/docs/reference/esql/functions/types/greatest.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/greatest.asciidoc +++ b/docs/reference/esql/functions/types/greatest.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/least.asciidoc b/docs/reference/esql/functions/types/least.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/least.asciidoc +++ b/docs/reference/esql/functions/types/least.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index a1894e429ae82..05f9ff6b19f9e 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer ip | ip | ip keyword | keyword | keyword long | long | long -text | text | text +text | text | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index 68e546451c8cb..976de79bb0910 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -16,6 +16,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 0a9dc073370c7..60c1f6315a599 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer | integer ip | integer | integer | ip keyword | integer | integer | keyword long | integer | integer | long -text | integer | integer | text +text | integer | integer | keyword version | integer | integer | version |=== diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 93965187482ac..c21ea5983945e 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -12,6 +12,6 @@ integer | keyword | integer ip | keyword | ip keyword | keyword | keyword long | keyword | long -text | keyword | text +text | keyword | keyword version | keyword | version |=== diff --git a/docs/reference/esql/functions/types/reverse.asciidoc b/docs/reference/esql/functions/types/reverse.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/reverse.asciidoc +++ b/docs/reference/esql/functions/types/reverse.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_lower.asciidoc b/docs/reference/esql/functions/types/to_lower.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_lower.asciidoc +++ b/docs/reference/esql/functions/types/to_lower.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_upper.asciidoc b/docs/reference/esql/functions/types/to_upper.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_upper.asciidoc +++ b/docs/reference/esql/functions/types/to_upper.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index 25d7962a27252..699bc7b10ce84 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -12,5 +12,5 @@ integer | integer | keyword | integer ip | integer | keyword | ip keyword | integer | keyword | keyword long | integer | keyword | long -text | integer | keyword | text +text | integer | keyword | keyword |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 8297ef5161fb0..cf6a8f51d1b81 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -84,5 +84,7 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("security/10_forbidden/Test bulk response with invalid credentials", "warning does not exist for compatibility") task.skipTest("inference/inference_crud/Test get all", "Assertions on number of inference models break due to default configs") task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry)", "The telemetry output changed. We dropped a column. That's safe.") + task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") + task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") }) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 5041c96128a1e..1b1eff8a07b1d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -584,6 +584,10 @@ static Builder builder() { return new Builder(); } + public DataType noText() { + return this == TEXT ? KEYWORD : this; + } + /** * Named parameters with default values. It's just easier to do this with * a builder in java.... diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec index 1397965145a1a..49960d1b5b0f3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec @@ -58,11 +58,11 @@ ROW zero="0"::double convertToString required_capability: casting_operator -ROW one=1::keyword, two=2::text, three=3::string +ROW one=1::keyword, two=2::double, three=3::string ; - one:keyword | two:keyword | three:keyword -1 |2 |3 +one:keyword | two:double | three:keyword +1 | 2.0 | 3 ; convertToDatetime diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 2dc21a86e6394..80ba18b85a004 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -131,17 +131,19 @@ OPQS | OPQS | OPQS | ___ | small maxOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats max(name), a = max(name), b = max(x); -max(name):text | a:text | b:text -Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l +max(name):keyword | a:keyword | b:keyword +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l ; maxOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -149,7 +151,7 @@ from airports | sort type asc | limit 4; -max(name):text | a:text | b:text | type:keyword +max(name):keyword| a:keyword | b:keyword | type:keyword Cheongju Int'l | Cheongju Int'l | Cheongju Int'l | major Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | mid Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | military @@ -211,17 +213,19 @@ LUH | LUH | LUH | ___ | small minOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats min(name), a = min(name), b = min(x); -min(name):text | a:text | b:text +min(name):keyword | a:keyword | b:keyword Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh ; minOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -229,7 +233,7 @@ from airports | sort type asc | limit 4; -min(name):text | a:text | b:text | type:keyword +min(name):keyword | a:keyword | b:keyword | type:keyword Chandigarh Int'l | Chandigarh Int'l | Chandigarh Int'l | major Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | mid Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | military diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec index 80d11425c5bb6..6eebb2f4d19da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec @@ -263,6 +263,7 @@ FROM employees topText required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -273,13 +274,14 @@ FROM books calc = TOP(calc, 3, "asc"), evil = TOP(CASE(year < 1980, title, author), 3, "desc"); -title:text | calc:keyword | evil:text +title:keyword | calc:keyword | evil:keyword [Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World, Planet of Exile, City of Illusions, Woman-The Full Story: A Dynamic Celebration of Freedoms, Winter notes on summer impressions] | ["'Bria", "Gent", "HE UN"] | [William Faulkner, William Faulkner, William Faulkner] ; topTextGrouping required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -293,7 +295,7 @@ FROM books | SORT author | LIMIT 3; - title:text | calc:keyword | evil:text | author:text + title:keyword | calc:keyword | evil:keyword | author:text A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Tolk | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Agnes Perkins The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | he Lo | [J. R. R. Tolkien, Alan Lee] | Alan Lee A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | Gent | [W. J. Leatherbarrow, Fyodor Dostoevsky, Alan Myers] | Alan Myers diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 00fa2fddb2106..305b8f3d8011e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1289,6 +1289,7 @@ x:integer | y:string reverseWithTextFields required_capability: fn_reverse +required_capability: functions_never_emit_text FROM books | EVAL title_reversed = REVERSE(title), author_reversed_twice = REVERSE(REVERSE(author)), eq = author_reversed_twice == author | KEEP title, title_reversed, author, author_reversed_twice, eq, book_no @@ -1296,7 +1297,7 @@ FROM books | WHERE book_no IN ("1211", "1463") | LIMIT 2; -title:text | title_reversed:text | author:text | author_reversed_twice:text | eq:boolean | book_no:keyword +title:text | title_reversed:keyword | author:text | author_reversed_twice:keyword | eq:boolean | book_no:keyword The brothers Karamazov | vozamaraK srehtorb ehT | Fyodor Dostoevsky | Fyodor Dostoevsky | true | 1211 Realms of Tolkien: Images of Middle-earth | htrae-elddiM fo segamI :neikloT fo smlaeR | J. R. R. Tolkien | J. R. R. Tolkien | true | 1463 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 55236af648236..196a864db2c15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -74,6 +74,11 @@ public enum Cap { */ FN_SUBSTRING_EMPTY_NULL, + /** + * All functions that take TEXT should never emit TEXT, only KEYWORD. #114334 + */ + FUNCTIONS_NEVER_EMIT_TEXT, + /** * Support for the {@code INLINESTATS} syntax. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index ee16193efdccc..ac2d4ff3cbc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -55,7 +55,7 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 7aaa41ea6ab11..a5fc8196847b7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -55,7 +55,7 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java index 4f81e0a897f9c..e0a7da806b3ac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java @@ -51,7 +51,7 @@ public class Top extends AggregateFunction implements ToAggregator, SurrogateExp private static final String ORDER_DESC = "DESC"; @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword" }, description = "Collects the top values for a field. Includes repeated values.", isAggregation = true, examples = @Example(file = "stats_top", tag = "top") @@ -175,7 +175,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index 8d576839c3c5c..111eab051719b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -52,7 +52,7 @@ public class Values extends AggregateFunction implements ToAggregator { ); @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "version" }, preview = true, description = "Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. " + "If you need the values returned in order use <>.", @@ -105,7 +105,7 @@ public Values withFilter(Expression filter) { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 4d34033286f52..53b51f16d4183 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -164,6 +164,6 @@ public final Expression field() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index d833a796cbecc..824f02ca7ccbb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -73,7 +73,6 @@ ConditionEvaluatorSupplier toEvaluator(ToEvaluator toEvaluator) { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ @@ -195,12 +194,12 @@ protected TypeResolution resolveType() { private TypeResolution resolveValueType(Expression value, int position) { if (dataType == null || dataType == NULL) { - dataType = value.dataType(); + dataType = value.dataType().noText(); return TypeResolution.TYPE_RESOLVED; } return TypeResolutions.isType( value, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index aad2d37d414b8..abc2ea85198fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -43,7 +43,7 @@ public class Greatest extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the maximum value from multiple columns. This is similar to <>\n" + "except it is intended to run on multiple columns at once.", note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. " @@ -104,12 +104,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 70ba9319385f3..a49fff0aa888b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -43,7 +43,7 @@ public class Least extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the minimum value from multiple columns. " + "This is similar to <> except it is intended to run on multiple columns at once.", examples = @Example(file = "math", tag = "least") @@ -102,12 +102,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java index 72d96a86d31eb..bcd6f4c30bf8a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java @@ -62,7 +62,6 @@ public class MvAppend extends EsqlScalarFunction implements EvaluatorMapper { "ip", "keyword", "long", - "text", "version" }, description = "Concatenates values of two multi-value fields." ) @@ -134,12 +133,12 @@ protected TypeResolution resolveType() { if (resolution.unresolved()) { return resolution; } - dataType = field1.dataType(); + dataType = field1.dataType().noText(); if (dataType == DataType.NULL) { - dataType = field2.dataType(); + dataType = field2.dataType().noText(); return isType(field2, DataType::isRepresentable, sourceText(), SECOND, "representable"); } - return isType(field2, t -> t == dataType, sourceText(), SECOND, dataType.typeName()); + return isType(field2, t -> t.noText() == dataType, sourceText(), SECOND, dataType.typeName()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index 34b89b4f78997..9a2b041fafeb6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -46,7 +46,6 @@ public class MvDedupe extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "version" }, description = "Remove duplicate values from a multivalued field.", note = "`MV_DEDUPE` may, but won't always, sort the values in the column.", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index d5d203e7bb3d1..957c74883ffdf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -53,7 +53,6 @@ public class MvFirst extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index 21487f14817cd..fedbc1934d1be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -53,7 +53,6 @@ public class MvLast extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java index 6a53c652d3420..5386a9e3ef763 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java @@ -36,7 +36,7 @@ public class MvMax extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMax", MvMax::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the maximum value.", examples = { @Example(file = "math", tag = "mv_max"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java index 4cc83c99b2c08..a2b3c53f322ba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java @@ -36,7 +36,7 @@ public class MvMin extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMin", MvMin::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the minimum value.", examples = { @Example(file = "math", tag = "mv_min"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index ef562c339dfd9..f4f9679dc3704 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -67,7 +67,6 @@ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, Eva "ip", "keyword", "long", - "text", "version" }, description = """ Returns a subset of the multivalued field using the start and end index values. @@ -240,7 +239,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } static int adjustIndex(int oldOffset, int fieldValueCount, int first) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index 5ca5618bf2a54..2286a1357ced8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -69,7 +69,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Sorts a multivalued field in lexicographical order.", examples = @Example(file = "ints", tag = "mv_sort") ) @@ -226,7 +226,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 6b9c8d0da025b..52686430ca5b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -61,7 +61,6 @@ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { "ip", "keyword", "long", - "text", "version" }, description = "Returns the first of its arguments that is not null. If all arguments are null, it returns `null`.", examples = { @Example(file = "null", tag = "coalesce") } @@ -145,12 +144,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { if (dataType == null || dataType == NULL) { - dataType = children().get(position).dataType(); + dataType = children().get(position).dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( children().get(position), - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java index 8a4a5f4d841a5..0b7233f10b454 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java @@ -34,7 +34,7 @@ public class LTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "LTrim", LTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading whitespaces from a string.", examples = @Example(file = "string", tag = "ltrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java index b79e1adf99a20..80809a444f5e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java @@ -34,7 +34,7 @@ public class RTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "RTrim", RTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes trailing whitespaces from a string.", examples = @Example(file = "string", tag = "rtrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java index e161566838cd9..02787999f24f7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java @@ -37,7 +37,7 @@ public class Reverse extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Reverse", Reverse::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string in reverse order.", examples = { @Example(file = "string", tag = "reverse"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java index c475469488d7b..5f2bbcde52166 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java @@ -39,7 +39,7 @@ public class ToLower extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to lower case.", examples = @Example(file = "string", tag = "to_lower") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java index 1b5084a7916ef..7fdd5e39f96f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java @@ -39,7 +39,7 @@ public class ToUpper extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to upper case.", examples = @Example(file = "string", tag = "to_upper") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java index 1fe7529caa2da..ef0afc3a4e6cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java @@ -34,7 +34,7 @@ public final class Trim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Trim", Trim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading and trailing whitespaces from a string.", examples = @Example(file = "string", tag = "trim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index edc3081a33681..05a658ec411f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -117,7 +117,6 @@ public class EsqlDataTypeConverter { entry(LONG, ToLong::new), // ToRadians, typeless entry(KEYWORD, ToString::new), - entry(TEXT, ToString::new), entry(UNSIGNED_LONG, ToUnsignedLong::new), entry(VERSION, ToVersion::new), entry(DATE_PERIOD, ToDatePeriod::new), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 8867e7425a92e..3cafd42b731f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -88,9 +88,6 @@ public void testInlineCast() throws IOException { Collections.sort(namesAndAliases); for (String nameOrAlias : namesAndAliases) { DataType expectedType = DataType.fromNameOrAlias(nameOrAlias); - if (expectedType == DataType.TEXT) { - expectedType = DataType.KEYWORD; - } if (EsqlDataTypeConverter.converterFunctionFactory(expectedType) == null) { continue; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 2ba175657b6c2..c12e0a8684ba9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1435,7 +1435,7 @@ public static TestCase typeError(List data, String expectedTypeError) this.source = Source.EMPTY; this.data = data; this.evaluatorToString = evaluatorToString; - this.expectedType = expectedType; + this.expectedType = expectedType == null ? null : expectedType.noText(); @SuppressWarnings("unchecked") Matcher downcast = (Matcher) matcher; this.matcher = downcast; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index ce2bf7e262ae9..9756804a1ec0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Max[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 7250072cd2003..171181496c889 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Min[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index fbb7c691b1d94..51b1c72c6e287 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -151,6 +151,33 @@ private static void twoAndThreeArgs( return testCase(type, typedData, lhsOrRhs ? lhs : rhs, toStringMatcher(1, false), false, null, addWarnings(warnings)); }) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, otherType)), + List.of(DataType.BOOLEAN, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + cond(cond, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + toStringMatcher(1, false), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } if (lhsOrRhs) { suppliers.add( new TestCaseSupplier( @@ -222,7 +249,6 @@ private static void twoAndThreeArgs( ) ); } - suppliers.add( new TestCaseSupplier( "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), @@ -292,6 +318,33 @@ private static void twoAndThreeArgs( } ) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.NULL, type, otherType)), + List.of(DataType.NULL, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + new TestCaseSupplier.TypedData(null, DataType.NULL, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + startsWith("CaseEagerEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } } suppliers.add( new TestCaseSupplier( @@ -804,7 +857,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty if (types.get(0) != DataType.BOOLEAN && types.get(0) != DataType.NULL) { return typeErrorMessage(includeOrdinal, types, 0, "boolean"); } - DataType mainType = types.get(1); + DataType mainType = types.get(1).noText(); for (int i = 2; i < types.size(); i++) { if (i % 2 == 0 && i != types.size() - 1) { // condition @@ -813,7 +866,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty } } else { // value - if (types.get(i) != mainType) { + if (types.get(i).noText() != mainType) { return typeErrorMessage(includeOrdinal, types, i, mainType.typeName()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 7af1c180fd7b9..1f564ecb87f1e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index c8bbe03bde411..7c136c3bb83c2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 88ef03a22d70c..55bd39bdd73cc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -392,7 +392,7 @@ setup: - method: POST path: /_query parameters: [method, path, parameters, capabilities] - capabilities: [fn_reverse] + capabilities: [fn_reverse, functions_never_emit_text] reason: "reverse not yet added" - do: allowed_warnings_regex: @@ -402,10 +402,10 @@ setup: query: 'FROM test | SORT name | EVAL job_reversed = REVERSE(job), tag_reversed = REVERSE(tag) | KEEP job_reversed, tag_reversed' - match: { columns.0.name: "job_reversed" } - - match: { columns.0.type: "text" } + - match: { columns.0.type: "keyword" } - match: { columns.1.name: "tag_reversed" } - - match: { columns.1.type: "text" } + - match: { columns.1.type: "keyword" } - length: { values: 2 } - match: { values.0: [ "rotceriD TI", "rab oof" ] } @@ -573,7 +573,6 @@ setup: body: query: 'FROM test | STATS job = VALUES(job) | EVAL job = MV_SORT(job) | LIMIT 1' - match: { columns.0.name: "job" } - - match: { columns.0.type: "text" } - length: { values: 1 } - match: { values.0: [ [ "IT Director", "Payroll Specialist" ] ] } @@ -592,7 +591,22 @@ setup: - match: { columns.0.name: "tag" } - match: { columns.0.type: "text" } - match: { columns.1.name: "job" } - - match: { columns.1.type: "text" } - length: { values: 2 } - match: { values.0: [ "baz", [ "Other", "Payroll Specialist" ] ] } - match: { values.1: [ "foo bar", "IT Director" ] } + +--- +"remove text typecast": + - requires: + capabilities: + - method: POST + path: /_query + parameters: [ method, path, parameters, capabilities ] + capabilities: [ functions_never_emit_text ] + reason: "Disabling ::text was done in 8.17 as part of removing all possibilities to emit text" + + - do: + catch: /Unsupported conversion to type \[TEXT\]/ + esql.query: + body: + query: 'FROM test | EVAL tag = name::text | KEEP name' From 16f61b460033baed6e7ae725fad96860d7a7f5e5 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 25 Oct 2024 10:15:56 +0200 Subject: [PATCH 141/202] Increase assert timeout for DeprecationHttpIT to reduce risk of failing when test cluster is slow to warm up (fixes #115179) (#115621) --- .../xpack/deprecation/DeprecationHttpIT.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java index 3fb9573dd7b62..4a17c2abbd797 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java @@ -121,7 +121,7 @@ public void testDeprecatedSettingsReturnWarnings() throws Exception { List> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId()); logger.warn(documents); assertThat(documents, hasSize(2)); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { Response response = cleanupSettings(); List warningHeaders = getWarningHeaders(response.getHeaders()); @@ -245,7 +245,7 @@ private void doTestDeprecationWarningsAppearInHeaders(String xOpaqueId) throws E var documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId); logger.warn(documents); assertThat(documents, hasSize(headerMatchers.size())); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testDeprecationRouteThrottling() throws Exception { @@ -275,7 +275,7 @@ public void testDeprecationRouteThrottling() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -303,7 +303,7 @@ public void testDisableDeprecationLogIndexing() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { configureWriteDeprecationLogsToIndex(null); } @@ -369,7 +369,7 @@ public void testDeprecationMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -414,7 +414,7 @@ public void testDeprecationCriticalWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -473,7 +473,7 @@ public void testDeprecationWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -504,7 +504,7 @@ public void testDeprecateAndKeep() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCurrentVersion() throws Exception { @@ -534,7 +534,7 @@ public void testReplacesInCurrentVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCompatibleVersion() throws Exception { @@ -579,7 +579,7 @@ public void testReplacesInCompatibleVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } /** @@ -649,7 +649,7 @@ public void testCompatibleMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -690,7 +690,7 @@ public void testDeprecationIndexingCacheReset() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } From 9394e88c0f00e58e6b49e7607fb70bde119e4e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 10:18:01 +0200 Subject: [PATCH 142/202] [DOCS] Updates inference processor docs. (#115566) --- docs/reference/ingest/processors/inference.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 4699f634afe37..9c6f0592a1d91 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -16,7 +16,7 @@ ingested in the pipeline. [options="header"] |====== | Name | Required | Default | Description -| `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. +| `model_id` . | yes | - | (String) An inference ID, a model deployment ID, a trained model ID or an alias. | `input_output` | no | - | (List) Input fields for {infer} and output (destination) fields for the {infer} results. This option is incompatible with the `target_field` and `field_map` options. | `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. | `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. From 11401a35d41c723e98c0dcc09f4874c9c842d349 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:45:39 +1100 Subject: [PATCH 143/202] Mute org.elasticsearch.oldrepos.OldRepositoryAccessIT testOldRepoAccess #115631 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 084bf27d6a11b..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT method: test {yaml=indices.create/10_basic/Create lookup index} issue: https://github.com/elastic/elasticsearch/issues/115605 +- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT + method: testOldRepoAccess + issue: https://github.com/elastic/elasticsearch/issues/115631 # Examples: # From 452ca351d3d0887db96c124dd83bb755e6e5894f Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:19:31 +0200 Subject: [PATCH 144/202] [DOCS] Test trivial commit (#115579) (#115628) (cherry picked from commit e642dd84815ea476d1e7b99f26f65cb5099d4e39) --- .../search-your-data/search-application-overview.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/search-your-data/search-application-overview.asciidoc b/docs/reference/search/search-your-data/search-application-overview.asciidoc index e12b55911740b..13cc97bb8aeab 100644 --- a/docs/reference/search/search-your-data/search-application-overview.asciidoc +++ b/docs/reference/search/search-your-data/search-application-overview.asciidoc @@ -74,7 +74,7 @@ To create a new search application in {kib}: . Name your search application. . Select *Create*. -Your search application should now be available in the list of search applications. +Your search application should now be available in the list. //[.screenshot] // image::../../images/search-applications/search-applications-create.png[Create search application screen] From b83042aa432776e4e1bcfe5c3c2f17ff2467a5e5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:30:37 +1100 Subject: [PATCH 145/202] Mute org.elasticsearch.xpack.esql.analysis.AnalyzerTests testMvAppendValidation #115636 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5c94c0aff60b6..4869b669f6220 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 +- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests + method: testMvAppendValidation + issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # From f1de84b51cf753e2bd1e381c0a6858797229b233 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:39:52 +0200 Subject: [PATCH 146/202] [DOCS] Fix casing in servicenow docs config (#115634) --- .../connector/docs/connectors-servicenow.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/connector/docs/connectors-servicenow.asciidoc b/docs/reference/connector/docs/connectors-servicenow.asciidoc index 089a3b405d8a5..a02c418f11d74 100644 --- a/docs/reference/connector/docs/connectors-servicenow.asciidoc +++ b/docs/reference/connector/docs/connectors-servicenow.asciidoc @@ -81,7 +81,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -89,7 +89,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` Enable document level security:: @@ -139,7 +139,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. @@ -305,7 +305,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -313,7 +313,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` `retry_count`:: @@ -374,7 +374,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. From 2d854768bc98b34bd4ea8217aced2e1d95140aef Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:37:38 +0200 Subject: [PATCH 147/202] Optimize threading in AbstractSearchAsyncAction (#113230) Forking when an action completes on the current thread is needlessly heavy handed in preventing stack-overflows. Also, we don't need locking/synchronization to deal with a worker-count + queue length problem. Both of these allow for non-trivial optimization even in the current execution model, also this change helps with moving to a more efficient execution model by saving needless forking to the search pool in particular. -> refactored the code to never fork but instead avoid stack-depth issues through use of a `SubscribableListener` -> replaced our home brew queue and semaphore combination by JDK primitives which saves blocking synchronization on task start and completion. --- .../search/AbstractSearchAsyncAction.java | 220 ++++++++---------- 1 file changed, 94 insertions(+), 126 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 0c585c705dcd0..cf25c5730d341 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -20,14 +20,13 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.shard.ShardId; @@ -43,7 +42,6 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.transport.Transport; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -51,9 +49,12 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -238,7 +239,12 @@ public final void run() { assert shardRoutings.skip() == false; assert shardIndexMap.containsKey(shardRoutings); int shardIndex = shardIndexMap.get(shardRoutings); - performPhaseOnShard(shardIndex, shardRoutings, shardRoutings.nextOrNull()); + final SearchShardTarget routing = shardRoutings.nextOrNull(); + if (routing == null) { + failOnUnavailable(shardIndex, shardRoutings); + } else { + performPhaseOnShard(shardIndex, shardRoutings, routing); + } } } } @@ -258,7 +264,7 @@ private static boolean assertExecuteOnStartThread() { int index = 0; assert stackTraceElements[index++].getMethodName().equals("getStackTrace"); assert stackTraceElements[index++].getMethodName().equals("assertExecuteOnStartThread"); - assert stackTraceElements[index++].getMethodName().equals("performPhaseOnShard"); + assert stackTraceElements[index++].getMethodName().equals("failOnUnavailable"); if (stackTraceElements[index].getMethodName().equals("performPhaseOnShard")) { assert stackTraceElements[index].getClassName().endsWith("CanMatchPreFilterSearchPhase"); index++; @@ -277,65 +283,53 @@ private static boolean assertExecuteOnStartThread() { } protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { - /* - * We capture the thread that this phase is starting on. When we are called back after executing the phase, we are either on the - * same thread (because we never went async, or the same thread was selected from the thread pool) or a different thread. If we - * continue on the same thread in the case that we never went async and this happens repeatedly we will end up recursing deeply and - * could stack overflow. To prevent this, we fork if we are called back on the same thread that execution started on and otherwise - * we can continue (cf. InitialSearchPhase#maybeFork). - */ - if (shard == null) { - assert assertExecuteOnStartThread(); - SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); - onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + if (throttleConcurrentRequests) { + var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent( + shard.getNodeId(), + n -> new PendingExecutions(maxConcurrentRequestsPerNode) + ); + pendingExecutions.submit(l -> doPerformPhaseOnShard(shardIndex, shardIt, shard, l)); } else { - final PendingExecutions pendingExecutions = throttleConcurrentRequests - ? pendingExecutionsPerNode.computeIfAbsent(shard.getNodeId(), n -> new PendingExecutions(maxConcurrentRequestsPerNode)) - : null; - Runnable r = () -> { - final Thread thread = Thread.currentThread(); - try { - executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { - @Override - public void innerOnResponse(Result result) { - try { - onShardResult(result, shardIt); - } catch (Exception exc) { - onShardFailure(shardIndex, shard, shardIt, exc); - } finally { - executeNext(pendingExecutions, thread); - } - } + doPerformPhaseOnShard(shardIndex, shardIt, shard, () -> {}); + } + } - @Override - public void onFailure(Exception t) { - try { - onShardFailure(shardIndex, shard, shardIt, t); - } finally { - executeNext(pendingExecutions, thread); - } - } - }); - } catch (final Exception e) { - try { - /* - * It is possible to run into connection exceptions here because we are getting the connection early and might - * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. - */ - fork(() -> onShardFailure(shardIndex, shard, shardIt, e)); - } finally { - executeNext(pendingExecutions, thread); + private void doPerformPhaseOnShard(int shardIndex, SearchShardIterator shardIt, SearchShardTarget shard, Releasable releasable) { + try { + executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { + @Override + public void innerOnResponse(Result result) { + try (releasable) { + onShardResult(result, shardIt); + } catch (Exception exc) { + onShardFailure(shardIndex, shard, shardIt, exc); } } - }; - if (throttleConcurrentRequests) { - pendingExecutions.tryRun(r); - } else { - r.run(); + + @Override + public void onFailure(Exception e) { + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); + } + } + }); + } catch (final Exception e) { + /* + * It is possible to run into connection exceptions here because we are getting the connection early and might + * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. + */ + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); } } } + private void failOnUnavailable(int shardIndex, SearchShardIterator shardIt) { + assert assertExecuteOnStartThread(); + SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); + onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + } + /** * Sends the request to the actual shard. * @param shardIt the shards iterator @@ -348,34 +342,6 @@ protected abstract void executePhaseOnShard( SearchActionListener listener ); - protected void fork(final Runnable runnable) { - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.error(() -> "unexpected error during [" + task + "]", e); - assert false : e; - } - - @Override - public void onRejection(Exception e) { - // avoid leaks during node shutdown by executing on the current thread if the executor shuts down - assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e; - doRun(); - } - - @Override - protected void doRun() { - runnable.run(); - } - - @Override - public boolean isForceExecution() { - // we can not allow a stuffed queue to reject execution here - return true; - } - }); - } - @Override public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { /* This is the main search phase transition where we move to the next phase. If all shards @@ -794,61 +760,63 @@ protected final ShardSearchRequest buildShardSearchRequest(SearchShardIterator s */ protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); - private void executeNext(PendingExecutions pendingExecutions, Thread originalThread) { - executeNext(pendingExecutions == null ? null : pendingExecutions.finishAndGetNext(), originalThread); - } - - void executeNext(Runnable runnable, Thread originalThread) { - if (runnable != null) { - assert throttleConcurrentRequests; - if (originalThread == Thread.currentThread()) { - fork(runnable); - } else { - runnable.run(); - } - } - } - private static final class PendingExecutions { - private final int permits; - private int permitsTaken = 0; - private final ArrayDeque queue = new ArrayDeque<>(); + private final Semaphore semaphore; + private final LinkedTransferQueue> queue = new LinkedTransferQueue<>(); PendingExecutions(int permits) { assert permits > 0 : "not enough permits: " + permits; - this.permits = permits; + semaphore = new Semaphore(permits); } - Runnable finishAndGetNext() { - synchronized (this) { - permitsTaken--; - assert permitsTaken >= 0 : "illegal taken permits: " + permitsTaken; + void submit(Consumer task) { + if (semaphore.tryAcquire()) { + executeAndRelease(task); + } else { + queue.add(task); + if (semaphore.tryAcquire()) { + task = pollNextTaskOrReleasePermit(); + if (task != null) { + executeAndRelease(task); + } + } } - return tryQueue(null); + } - void tryRun(Runnable runnable) { - Runnable r = tryQueue(runnable); - if (r != null) { - r.run(); + private void executeAndRelease(Consumer task) { + while (task != null) { + final SubscribableListener onDone = new SubscribableListener<>(); + task.accept(() -> onDone.onResponse(null)); + if (onDone.isDone()) { + // keep going on the current thread, no need to fork + task = pollNextTaskOrReleasePermit(); + } else { + onDone.addListener(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + final Consumer nextTask = pollNextTaskOrReleasePermit(); + if (nextTask != null) { + executeAndRelease(nextTask); + } + } + + @Override + public void onFailure(Exception e) { + assert false : e; + } + }); + return; + } } } - private synchronized Runnable tryQueue(Runnable runnable) { - Runnable toExecute = null; - if (permitsTaken < permits) { - permitsTaken++; - toExecute = runnable; - if (toExecute == null) { // only poll if we don't have anything to execute - toExecute = queue.poll(); - } - if (toExecute == null) { - permitsTaken--; - } - } else if (runnable != null) { - queue.add(runnable); + private Consumer pollNextTaskOrReleasePermit() { + var task = queue.poll(); + if (task == null) { + semaphore.release(); } - return toExecute; + return task; } } } From 13e67bdd0803914ac75ec13853828fec1b42d4a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 25 Oct 2024 12:43:13 +0200 Subject: [PATCH 148/202] Refactoring of the KQL grammar. (#115632) --- x-pack/plugin/kql/src/main/antlr/KqlBase.g4 | 95 +- .../plugin/kql/src/main/antlr/KqlBase.tokens | 31 +- .../kql/src/main/antlr/KqlBaseLexer.tokens | 31 +- .../xpack/kql/parser/KqlBase.interp | 28 +- .../xpack/kql/parser/KqlBaseBaseListener.java | 56 +- .../xpack/kql/parser/KqlBaseBaseVisitor.java | 30 +- .../xpack/kql/parser/KqlBaseLexer.interp | 22 +- .../xpack/kql/parser/KqlBaseLexer.java | 255 ++--- .../xpack/kql/parser/KqlBaseListener.java | 84 +- .../xpack/kql/parser/KqlBaseParser.java | 1010 ++++++++++------- .../xpack/kql/parser/KqlBaseVisitor.java | 44 +- .../kql/src/test/resources/supported-queries | 9 + .../src/test/resources/unsupported-queries | 8 - 13 files changed, 900 insertions(+), 803 deletions(-) diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index cffa2db9f959a..dbf7c1979796a 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -26,70 +26,68 @@ topLevelQuery ; query - : query (AND | OR) query #booleanQuery - | NOT subQuery=simpleQuery #notQuery - | simpleQuery #defaultQuery + : query operator=(AND | OR) query #booleanQuery + | NOT subQuery=simpleQuery #notQuery + | simpleQuery #defaultQuery ; simpleQuery : nestedQuery - | expression | parenthesizedQuery - ; - -expression - : fieldTermQuery - | fieldRangeQuery + | matchAllQuery + | existsQuery + | rangeQuery + | fieldQuery + | fieldLessQuery ; nestedQuery : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET ; -parenthesizedQuery: - LEFT_PARENTHESIS query RIGHT_PARENTHESIS; - -fieldRangeQuery - : fieldName operator=OP_COMPARE rangeQueryValue +matchAllQuery + : (WILDCARD COLON)? WILDCARD ; -fieldTermQuery - : (fieldName COLON)? termQueryValue +parenthesizedQuery + : LEFT_PARENTHESIS query RIGHT_PARENTHESIS ; -fieldName - : wildcardExpression - | unquotedLiteralExpression - | quotedStringExpression +rangeQuery + : fieldName operator=(OP_LESS|OP_LESS_EQ|OP_MORE|OP_MORE_EQ) rangeQueryValue ; rangeQueryValue - : unquotedLiteralExpression - | quotedStringExpression - ; - -termQueryValue - : wildcardExpression - | quotedStringExpression - | termValue=unquotedLiteralExpression - | groupingTermExpression; + : (UNQUOTED_LITERAL|WILDCARD)+ + | QUOTED_STRING + ; -groupingTermExpression - : LEFT_PARENTHESIS unquotedLiteralExpression RIGHT_PARENTHESIS +existsQuery + :fieldName COLON WILDCARD ; -unquotedLiteralExpression - : UNQUOTED_LITERAL+ +fieldQuery + : fieldName COLON fieldQueryValue + | fieldName COLON LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -quotedStringExpression - : QUOTED_STRING +fieldLessQuery + : fieldQueryValue + | LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -wildcardExpression - : WILDCARD -; +fieldQueryValue + : (AND|OR)? (UNQUOTED_LITERAL | WILDCARD )+ + | (UNQUOTED_LITERAL | WILDCARD )+ (AND|OR)? + | (NOT|AND|OR) + | QUOTED_STRING + ; +fieldName + : value=UNQUOTED_LITERAL+ + | value=QUOTED_STRING + | value=WILDCARD + ; DEFAULT_SKIP: WHITESPACE -> skip; @@ -98,31 +96,34 @@ OR: 'or'; NOT: 'not'; COLON: ':'; -OP_COMPARE: OP_LESS | OP_MORE | OP_LESS_EQ | OP_MORE_EQ; +OP_LESS: '<'; +OP_LESS_EQ: '<='; +OP_MORE: '>'; +OP_MORE_EQ: '>='; LEFT_PARENTHESIS: '('; RIGHT_PARENTHESIS: ')'; LEFT_CURLY_BRACKET: '{'; RIGHT_CURLY_BRACKET: '}'; -UNQUOTED_LITERAL: WILDCARD* UNQUOTED_LITERAL_CHAR+ WILDCARD*; +UNQUOTED_LITERAL: UNQUOTED_LITERAL_CHAR+; QUOTED_STRING: '"'QUOTED_CHAR*'"'; -WILDCARD: WILDCARD_CHAR+; +WILDCARD: WILDCARD_CHAR; fragment WILDCARD_CHAR: '*'; -fragment OP_LESS: '<'; -fragment OP_LESS_EQ: '<='; -fragment OP_MORE: '>'; -fragment OP_MORE_EQ: '>='; fragment UNQUOTED_LITERAL_CHAR + : WILDCARD_CHAR* UNQUOTED_LITERAL_BASE_CHAR WILDCARD_CHAR* + | WILDCARD_CHAR WILDCARD_CHAR+ + ; + +fragment UNQUOTED_LITERAL_BASE_CHAR : ESCAPED_WHITESPACE | ESCAPED_SPECIAL_CHAR | ESCAPE_UNICODE_SEQUENCE | '\\' (AND | OR | NOT) - | WILDCARD_CHAR UNQUOTED_LITERAL_CHAR | NON_SPECIAL_CHAR ; @@ -135,7 +136,7 @@ fragment QUOTED_CHAR fragment WHITESPACE: [ \t\n\r\u3000]; fragment ESCAPED_WHITESPACE: '\\r' | '\\t' | '\\n'; -fragment NON_SPECIAL_CHAR: ~[ \\():<>"*{}]; +fragment NON_SPECIAL_CHAR: ~[ \n\r\t\u3000\\():<>"*{}]; fragment ESCAPED_SPECIAL_CHAR: '\\'[ \\():<>"*{}]; fragment ESCAPED_QUOTE: '\\"'; diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 1954195b52363..111cac6d641b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -34,19 +40,17 @@ rule names: topLevelQuery query simpleQuery -expression nestedQuery +matchAllQuery parenthesizedQuery -fieldRangeQuery -fieldTermQuery -fieldName +rangeQuery rangeQueryValue -termQueryValue -groupingTermExpression -unquotedLiteralExpression -quotedStringExpression -wildcardExpression +existsQuery +fieldQuery +fieldLessQuery +fieldQueryValue +fieldName atn: -[4, 1, 13, 108, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 1, 0, 3, 0, 32, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 40, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 45, 8, 1, 10, 1, 12, 1, 48, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 3, 3, 57, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 3, 7, 76, 8, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 3, 8, 83, 8, 8, 1, 9, 1, 9, 3, 9, 87, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 93, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 4, 12, 100, 8, 12, 11, 12, 12, 12, 101, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 0, 1, 2, 15, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 0, 1, 1, 0, 2, 3, 106, 0, 31, 1, 0, 0, 0, 2, 39, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 56, 1, 0, 0, 0, 8, 58, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 68, 1, 0, 0, 0, 14, 75, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 86, 1, 0, 0, 0, 20, 92, 1, 0, 0, 0, 22, 94, 1, 0, 0, 0, 24, 99, 1, 0, 0, 0, 26, 103, 1, 0, 0, 0, 28, 105, 1, 0, 0, 0, 30, 32, 3, 2, 1, 0, 31, 30, 1, 0, 0, 0, 31, 32, 1, 0, 0, 0, 32, 33, 1, 0, 0, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 36, 6, 1, -1, 0, 36, 37, 5, 4, 0, 0, 37, 40, 3, 4, 2, 0, 38, 40, 3, 4, 2, 0, 39, 35, 1, 0, 0, 0, 39, 38, 1, 0, 0, 0, 40, 46, 1, 0, 0, 0, 41, 42, 10, 3, 0, 0, 42, 43, 7, 0, 0, 0, 43, 45, 3, 2, 1, 4, 44, 41, 1, 0, 0, 0, 45, 48, 1, 0, 0, 0, 46, 44, 1, 0, 0, 0, 46, 47, 1, 0, 0, 0, 47, 3, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 49, 53, 3, 8, 4, 0, 50, 53, 3, 6, 3, 0, 51, 53, 3, 10, 5, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 57, 3, 14, 7, 0, 55, 57, 3, 12, 6, 0, 56, 54, 1, 0, 0, 0, 56, 55, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 59, 3, 16, 8, 0, 59, 60, 5, 5, 0, 0, 60, 61, 5, 9, 0, 0, 61, 62, 3, 2, 1, 0, 62, 63, 5, 10, 0, 0, 63, 9, 1, 0, 0, 0, 64, 65, 5, 7, 0, 0, 65, 66, 3, 2, 1, 0, 66, 67, 5, 8, 0, 0, 67, 11, 1, 0, 0, 0, 68, 69, 3, 16, 8, 0, 69, 70, 5, 6, 0, 0, 70, 71, 3, 18, 9, 0, 71, 13, 1, 0, 0, 0, 72, 73, 3, 16, 8, 0, 73, 74, 5, 5, 0, 0, 74, 76, 1, 0, 0, 0, 75, 72, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 78, 3, 20, 10, 0, 78, 15, 1, 0, 0, 0, 79, 83, 3, 28, 14, 0, 80, 83, 3, 24, 12, 0, 81, 83, 3, 26, 13, 0, 82, 79, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 81, 1, 0, 0, 0, 83, 17, 1, 0, 0, 0, 84, 87, 3, 24, 12, 0, 85, 87, 3, 26, 13, 0, 86, 84, 1, 0, 0, 0, 86, 85, 1, 0, 0, 0, 87, 19, 1, 0, 0, 0, 88, 93, 3, 28, 14, 0, 89, 93, 3, 26, 13, 0, 90, 93, 3, 24, 12, 0, 91, 93, 3, 22, 11, 0, 92, 88, 1, 0, 0, 0, 92, 89, 1, 0, 0, 0, 92, 90, 1, 0, 0, 0, 92, 91, 1, 0, 0, 0, 93, 21, 1, 0, 0, 0, 94, 95, 5, 7, 0, 0, 95, 96, 3, 24, 12, 0, 96, 97, 5, 8, 0, 0, 97, 23, 1, 0, 0, 0, 98, 100, 5, 11, 0, 0, 99, 98, 1, 0, 0, 0, 100, 101, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 25, 1, 0, 0, 0, 103, 104, 5, 12, 0, 0, 104, 27, 1, 0, 0, 0, 105, 106, 5, 13, 0, 0, 106, 29, 1, 0, 0, 0, 10, 31, 39, 46, 52, 56, 75, 82, 86, 92, 101] \ No newline at end of file +[4, 1, 16, 135, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 3, 0, 28, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 41, 8, 1, 10, 1, 12, 1, 44, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 3, 4, 63, 8, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 4, 7, 76, 8, 7, 11, 7, 12, 7, 77, 1, 7, 3, 7, 81, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 97, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 104, 8, 10, 1, 11, 3, 11, 107, 8, 11, 1, 11, 4, 11, 110, 8, 11, 11, 11, 12, 11, 111, 1, 11, 4, 11, 115, 8, 11, 11, 11, 12, 11, 116, 1, 11, 3, 11, 120, 8, 11, 1, 11, 1, 11, 3, 11, 124, 8, 11, 1, 12, 4, 12, 127, 8, 12, 11, 12, 12, 12, 128, 1, 12, 1, 12, 3, 12, 133, 8, 12, 1, 12, 0, 1, 2, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 27, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 62, 1, 0, 0, 0, 10, 66, 1, 0, 0, 0, 12, 70, 1, 0, 0, 0, 14, 80, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 96, 1, 0, 0, 0, 20, 103, 1, 0, 0, 0, 22, 123, 1, 0, 0, 0, 24, 132, 1, 0, 0, 0, 26, 28, 3, 2, 1, 0, 27, 26, 1, 0, 0, 0, 27, 28, 1, 0, 0, 0, 28, 29, 1, 0, 0, 0, 29, 30, 5, 0, 0, 1, 30, 1, 1, 0, 0, 0, 31, 32, 6, 1, -1, 0, 32, 33, 5, 4, 0, 0, 33, 36, 3, 4, 2, 0, 34, 36, 3, 4, 2, 0, 35, 31, 1, 0, 0, 0, 35, 34, 1, 0, 0, 0, 36, 42, 1, 0, 0, 0, 37, 38, 10, 3, 0, 0, 38, 39, 7, 0, 0, 0, 39, 41, 3, 2, 1, 3, 40, 37, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 3, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 53, 3, 6, 3, 0, 46, 53, 3, 10, 5, 0, 47, 53, 3, 8, 4, 0, 48, 53, 3, 16, 8, 0, 49, 53, 3, 12, 6, 0, 50, 53, 3, 18, 9, 0, 51, 53, 3, 20, 10, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 3, 24, 12, 0, 55, 56, 5, 5, 0, 0, 56, 57, 5, 12, 0, 0, 57, 58, 3, 2, 1, 0, 58, 59, 5, 13, 0, 0, 59, 7, 1, 0, 0, 0, 60, 61, 5, 16, 0, 0, 61, 63, 5, 5, 0, 0, 62, 60, 1, 0, 0, 0, 62, 63, 1, 0, 0, 0, 63, 64, 1, 0, 0, 0, 64, 65, 5, 16, 0, 0, 65, 9, 1, 0, 0, 0, 66, 67, 5, 10, 0, 0, 67, 68, 3, 2, 1, 0, 68, 69, 5, 11, 0, 0, 69, 11, 1, 0, 0, 0, 70, 71, 3, 24, 12, 0, 71, 72, 7, 1, 0, 0, 72, 73, 3, 14, 7, 0, 73, 13, 1, 0, 0, 0, 74, 76, 7, 2, 0, 0, 75, 74, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 81, 1, 0, 0, 0, 79, 81, 5, 15, 0, 0, 80, 75, 1, 0, 0, 0, 80, 79, 1, 0, 0, 0, 81, 15, 1, 0, 0, 0, 82, 83, 3, 24, 12, 0, 83, 84, 5, 5, 0, 0, 84, 85, 5, 16, 0, 0, 85, 17, 1, 0, 0, 0, 86, 87, 3, 24, 12, 0, 87, 88, 5, 5, 0, 0, 88, 89, 3, 22, 11, 0, 89, 97, 1, 0, 0, 0, 90, 91, 3, 24, 12, 0, 91, 92, 5, 5, 0, 0, 92, 93, 5, 10, 0, 0, 93, 94, 3, 22, 11, 0, 94, 95, 5, 11, 0, 0, 95, 97, 1, 0, 0, 0, 96, 86, 1, 0, 0, 0, 96, 90, 1, 0, 0, 0, 97, 19, 1, 0, 0, 0, 98, 104, 3, 22, 11, 0, 99, 100, 5, 10, 0, 0, 100, 101, 3, 22, 11, 0, 101, 102, 5, 11, 0, 0, 102, 104, 1, 0, 0, 0, 103, 98, 1, 0, 0, 0, 103, 99, 1, 0, 0, 0, 104, 21, 1, 0, 0, 0, 105, 107, 7, 0, 0, 0, 106, 105, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 109, 1, 0, 0, 0, 108, 110, 7, 2, 0, 0, 109, 108, 1, 0, 0, 0, 110, 111, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 124, 1, 0, 0, 0, 113, 115, 7, 2, 0, 0, 114, 113, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 119, 1, 0, 0, 0, 118, 120, 7, 0, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 124, 1, 0, 0, 0, 121, 124, 7, 3, 0, 0, 122, 124, 5, 15, 0, 0, 123, 106, 1, 0, 0, 0, 123, 114, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 122, 1, 0, 0, 0, 124, 23, 1, 0, 0, 0, 125, 127, 5, 14, 0, 0, 126, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 133, 1, 0, 0, 0, 130, 133, 5, 15, 0, 0, 131, 133, 5, 16, 0, 0, 132, 126, 1, 0, 0, 0, 132, 130, 1, 0, 0, 0, 132, 131, 1, 0, 0, 0, 133, 25, 1, 0, 0, 0, 16, 27, 35, 42, 52, 62, 77, 80, 96, 103, 106, 111, 116, 119, 123, 128, 132] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index 1b4282b5dbbea..426af7f7115b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -80,18 +80,6 @@ class KqlBaseBaseListener implements KqlBaseListener { *

The default implementation does nothing.

*/ @Override public void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterExpression(KqlBaseParser.ExpressionContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitExpression(KqlBaseParser.ExpressionContext ctx) { } /** * {@inheritDoc} * @@ -109,49 +97,37 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * @@ -169,61 +145,61 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index 09cd668804154..cf1f2b3972823 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -55,13 +55,6 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitExpression(KqlBaseParser.ExpressionContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -75,28 +68,21 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } + @Override public T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -110,33 +96,33 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { return visitChildren(ctx); } + @Override public T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp index d178df5fcbc88..f9afe07af3b40 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -36,7 +42,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -45,11 +54,8 @@ UNQUOTED_LITERAL QUOTED_STRING WILDCARD WILDCARD_CHAR -OP_LESS -OP_LESS_EQ -OP_MORE -OP_MORE_EQ UNQUOTED_LITERAL_CHAR +UNQUOTED_LITERAL_BASE_CHAR QUOTED_CHAR WHITESPACE ESCAPED_WHITESPACE @@ -68,4 +74,4 @@ mode names: DEFAULT_MODE atn: -[4, 0, 13, 181, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 79, 8, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 5, 10, 90, 8, 10, 10, 10, 12, 10, 93, 9, 10, 1, 10, 4, 10, 96, 8, 10, 11, 10, 12, 10, 97, 1, 10, 5, 10, 101, 8, 10, 10, 10, 12, 10, 104, 9, 10, 1, 11, 1, 11, 5, 11, 108, 8, 11, 10, 11, 12, 11, 111, 9, 11, 1, 11, 1, 11, 1, 12, 4, 12, 116, 8, 12, 11, 12, 12, 12, 117, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 145, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 151, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 161, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 0, 29, 0, 31, 0, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 11, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 185, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 78, 1, 0, 0, 0, 13, 80, 1, 0, 0, 0, 15, 82, 1, 0, 0, 0, 17, 84, 1, 0, 0, 0, 19, 86, 1, 0, 0, 0, 21, 91, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 115, 1, 0, 0, 0, 27, 119, 1, 0, 0, 0, 29, 121, 1, 0, 0, 0, 31, 123, 1, 0, 0, 0, 33, 126, 1, 0, 0, 0, 35, 128, 1, 0, 0, 0, 37, 144, 1, 0, 0, 0, 39, 150, 1, 0, 0, 0, 41, 152, 1, 0, 0, 0, 43, 160, 1, 0, 0, 0, 45, 162, 1, 0, 0, 0, 47, 164, 1, 0, 0, 0, 49, 167, 1, 0, 0, 0, 51, 170, 1, 0, 0, 0, 53, 173, 1, 0, 0, 0, 55, 179, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 79, 3, 29, 14, 0, 75, 79, 3, 33, 16, 0, 76, 79, 3, 31, 15, 0, 77, 79, 3, 35, 17, 0, 78, 74, 1, 0, 0, 0, 78, 75, 1, 0, 0, 0, 78, 76, 1, 0, 0, 0, 78, 77, 1, 0, 0, 0, 79, 12, 1, 0, 0, 0, 80, 81, 5, 40, 0, 0, 81, 14, 1, 0, 0, 0, 82, 83, 5, 41, 0, 0, 83, 16, 1, 0, 0, 0, 84, 85, 5, 123, 0, 0, 85, 18, 1, 0, 0, 0, 86, 87, 5, 125, 0, 0, 87, 20, 1, 0, 0, 0, 88, 90, 3, 25, 12, 0, 89, 88, 1, 0, 0, 0, 90, 93, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 95, 1, 0, 0, 0, 93, 91, 1, 0, 0, 0, 94, 96, 3, 37, 18, 0, 95, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 95, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 102, 1, 0, 0, 0, 99, 101, 3, 25, 12, 0, 100, 99, 1, 0, 0, 0, 101, 104, 1, 0, 0, 0, 102, 100, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 22, 1, 0, 0, 0, 104, 102, 1, 0, 0, 0, 105, 109, 5, 34, 0, 0, 106, 108, 3, 39, 19, 0, 107, 106, 1, 0, 0, 0, 108, 111, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 112, 113, 5, 34, 0, 0, 113, 24, 1, 0, 0, 0, 114, 116, 3, 27, 13, 0, 115, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 115, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 26, 1, 0, 0, 0, 119, 120, 5, 42, 0, 0, 120, 28, 1, 0, 0, 0, 121, 122, 5, 60, 0, 0, 122, 30, 1, 0, 0, 0, 123, 124, 5, 60, 0, 0, 124, 125, 5, 61, 0, 0, 125, 32, 1, 0, 0, 0, 126, 127, 5, 62, 0, 0, 127, 34, 1, 0, 0, 0, 128, 129, 5, 62, 0, 0, 129, 130, 5, 61, 0, 0, 130, 36, 1, 0, 0, 0, 131, 145, 3, 43, 21, 0, 132, 145, 3, 47, 23, 0, 133, 145, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 145, 1, 0, 0, 0, 140, 141, 3, 27, 13, 0, 141, 142, 3, 37, 18, 0, 142, 145, 1, 0, 0, 0, 143, 145, 3, 45, 22, 0, 144, 131, 1, 0, 0, 0, 144, 132, 1, 0, 0, 0, 144, 133, 1, 0, 0, 0, 144, 134, 1, 0, 0, 0, 144, 140, 1, 0, 0, 0, 144, 143, 1, 0, 0, 0, 145, 38, 1, 0, 0, 0, 146, 151, 3, 43, 21, 0, 147, 151, 3, 51, 25, 0, 148, 151, 3, 49, 24, 0, 149, 151, 8, 6, 0, 0, 150, 146, 1, 0, 0, 0, 150, 147, 1, 0, 0, 0, 150, 148, 1, 0, 0, 0, 150, 149, 1, 0, 0, 0, 151, 40, 1, 0, 0, 0, 152, 153, 7, 7, 0, 0, 153, 42, 1, 0, 0, 0, 154, 155, 5, 92, 0, 0, 155, 161, 7, 4, 0, 0, 156, 157, 5, 92, 0, 0, 157, 161, 7, 5, 0, 0, 158, 159, 5, 92, 0, 0, 159, 161, 7, 1, 0, 0, 160, 154, 1, 0, 0, 0, 160, 156, 1, 0, 0, 0, 160, 158, 1, 0, 0, 0, 161, 44, 1, 0, 0, 0, 162, 163, 8, 8, 0, 0, 163, 46, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 7, 8, 0, 0, 166, 48, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 5, 34, 0, 0, 169, 50, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 172, 3, 53, 26, 0, 172, 52, 1, 0, 0, 0, 173, 174, 7, 9, 0, 0, 174, 175, 3, 55, 27, 0, 175, 176, 3, 55, 27, 0, 176, 177, 3, 55, 27, 0, 177, 178, 3, 55, 27, 0, 178, 54, 1, 0, 0, 0, 179, 180, 7, 10, 0, 0, 180, 56, 1, 0, 0, 0, 11, 0, 78, 91, 97, 102, 109, 117, 138, 144, 150, 160, 1, 6, 0, 0] \ No newline at end of file +[4, 0, 16, 178, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 4, 13, 94, 8, 13, 11, 13, 12, 13, 95, 1, 14, 1, 14, 5, 14, 100, 8, 14, 10, 14, 12, 14, 103, 9, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 5, 17, 112, 8, 17, 10, 17, 12, 17, 115, 9, 17, 1, 17, 1, 17, 5, 17, 119, 8, 17, 10, 17, 12, 17, 122, 9, 17, 1, 17, 1, 17, 4, 17, 126, 8, 17, 11, 17, 12, 17, 127, 3, 17, 130, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 3, 18, 142, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 148, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 158, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 12, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 12, 0, 9, 10, 13, 13, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 182, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 74, 1, 0, 0, 0, 13, 76, 1, 0, 0, 0, 15, 79, 1, 0, 0, 0, 17, 81, 1, 0, 0, 0, 19, 84, 1, 0, 0, 0, 21, 86, 1, 0, 0, 0, 23, 88, 1, 0, 0, 0, 25, 90, 1, 0, 0, 0, 27, 93, 1, 0, 0, 0, 29, 97, 1, 0, 0, 0, 31, 106, 1, 0, 0, 0, 33, 108, 1, 0, 0, 0, 35, 129, 1, 0, 0, 0, 37, 141, 1, 0, 0, 0, 39, 147, 1, 0, 0, 0, 41, 149, 1, 0, 0, 0, 43, 157, 1, 0, 0, 0, 45, 159, 1, 0, 0, 0, 47, 161, 1, 0, 0, 0, 49, 164, 1, 0, 0, 0, 51, 167, 1, 0, 0, 0, 53, 170, 1, 0, 0, 0, 55, 176, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 75, 5, 60, 0, 0, 75, 12, 1, 0, 0, 0, 76, 77, 5, 60, 0, 0, 77, 78, 5, 61, 0, 0, 78, 14, 1, 0, 0, 0, 79, 80, 5, 62, 0, 0, 80, 16, 1, 0, 0, 0, 81, 82, 5, 62, 0, 0, 82, 83, 5, 61, 0, 0, 83, 18, 1, 0, 0, 0, 84, 85, 5, 40, 0, 0, 85, 20, 1, 0, 0, 0, 86, 87, 5, 41, 0, 0, 87, 22, 1, 0, 0, 0, 88, 89, 5, 123, 0, 0, 89, 24, 1, 0, 0, 0, 90, 91, 5, 125, 0, 0, 91, 26, 1, 0, 0, 0, 92, 94, 3, 35, 17, 0, 93, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 28, 1, 0, 0, 0, 97, 101, 5, 34, 0, 0, 98, 100, 3, 39, 19, 0, 99, 98, 1, 0, 0, 0, 100, 103, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 104, 1, 0, 0, 0, 103, 101, 1, 0, 0, 0, 104, 105, 5, 34, 0, 0, 105, 30, 1, 0, 0, 0, 106, 107, 3, 33, 16, 0, 107, 32, 1, 0, 0, 0, 108, 109, 5, 42, 0, 0, 109, 34, 1, 0, 0, 0, 110, 112, 3, 33, 16, 0, 111, 110, 1, 0, 0, 0, 112, 115, 1, 0, 0, 0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 116, 1, 0, 0, 0, 115, 113, 1, 0, 0, 0, 116, 120, 3, 37, 18, 0, 117, 119, 3, 33, 16, 0, 118, 117, 1, 0, 0, 0, 119, 122, 1, 0, 0, 0, 120, 118, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 130, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 123, 125, 3, 33, 16, 0, 124, 126, 3, 33, 16, 0, 125, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 130, 1, 0, 0, 0, 129, 113, 1, 0, 0, 0, 129, 123, 1, 0, 0, 0, 130, 36, 1, 0, 0, 0, 131, 142, 3, 43, 21, 0, 132, 142, 3, 47, 23, 0, 133, 142, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 142, 1, 0, 0, 0, 140, 142, 3, 45, 22, 0, 141, 131, 1, 0, 0, 0, 141, 132, 1, 0, 0, 0, 141, 133, 1, 0, 0, 0, 141, 134, 1, 0, 0, 0, 141, 140, 1, 0, 0, 0, 142, 38, 1, 0, 0, 0, 143, 148, 3, 43, 21, 0, 144, 148, 3, 51, 25, 0, 145, 148, 3, 49, 24, 0, 146, 148, 8, 6, 0, 0, 147, 143, 1, 0, 0, 0, 147, 144, 1, 0, 0, 0, 147, 145, 1, 0, 0, 0, 147, 146, 1, 0, 0, 0, 148, 40, 1, 0, 0, 0, 149, 150, 7, 7, 0, 0, 150, 42, 1, 0, 0, 0, 151, 152, 5, 92, 0, 0, 152, 158, 7, 4, 0, 0, 153, 154, 5, 92, 0, 0, 154, 158, 7, 5, 0, 0, 155, 156, 5, 92, 0, 0, 156, 158, 7, 1, 0, 0, 157, 151, 1, 0, 0, 0, 157, 153, 1, 0, 0, 0, 157, 155, 1, 0, 0, 0, 158, 44, 1, 0, 0, 0, 159, 160, 8, 8, 0, 0, 160, 46, 1, 0, 0, 0, 161, 162, 5, 92, 0, 0, 162, 163, 7, 9, 0, 0, 163, 48, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 5, 34, 0, 0, 166, 50, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 3, 53, 26, 0, 169, 52, 1, 0, 0, 0, 170, 171, 7, 10, 0, 0, 171, 172, 3, 55, 27, 0, 172, 173, 3, 55, 27, 0, 173, 174, 3, 55, 27, 0, 174, 175, 3, 55, 27, 0, 175, 54, 1, 0, 0, 0, 176, 177, 7, 11, 0, 0, 177, 56, 1, 0, 0, 0, 11, 0, 95, 101, 113, 120, 127, 129, 138, 141, 147, 157, 1, 6, 0, 0] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java index b397a412d5e8e..f9353afd6e114 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java @@ -25,9 +25,9 @@ class KqlBaseLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; @@ -38,28 +38,29 @@ class KqlBaseLexer extends Lexer { private static String[] makeRuleNames() { return new String[] { - "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD", "WILDCARD_CHAR", "OP_LESS", "OP_LESS_EQ", - "OP_MORE", "OP_MORE_EQ", "UNQUOTED_LITERAL_CHAR", "QUOTED_CHAR", "WHITESPACE", - "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", - "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", "HEX_DIGIT" + "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD", + "WILDCARD_CHAR", "UNQUOTED_LITERAL_CHAR", "UNQUOTED_LITERAL_BASE_CHAR", + "QUOTED_CHAR", "WHITESPACE", "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", + "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", + "HEX_DIGIT" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -121,119 +122,119 @@ public KqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000\r\u00b5\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002\u0001"+ - "\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004"+ - "\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007"+ - "\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b"+ - "\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002"+ - "\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002"+ - "\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002"+ - "\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002"+ - "\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002"+ - "\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ - "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005O\b"+ - "\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ - "\u0001\t\u0001\t\u0001\n\u0005\nZ\b\n\n\n\f\n]\t\n\u0001\n\u0004\n`\b"+ - "\n\u000b\n\f\na\u0001\n\u0005\ne\b\n\n\n\f\nh\t\n\u0001\u000b\u0001\u000b"+ - "\u0005\u000bl\b\u000b\n\u000b\f\u000bo\t\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\f\u0004\ft\b\f\u000b\f\f\fu\u0001\r\u0001\r\u0001\u000e\u0001\u000e"+ - "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u0091\b\u0012\u0001\u0013"+ - "\u0001\u0013\u0001\u0013\u0001\u0013\u0003\u0013\u0097\b\u0013\u0001\u0014"+ - "\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015"+ - "\u0001\u0015\u0003\u0015\u00a1\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019"+ - "\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a"+ - "\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0000\u0000\u001c\u0001"+ - "\u0001\u0003\u0002\u0005\u0003\u0007\u0004\t\u0005\u000b\u0006\r\u0007"+ - "\u000f\b\u0011\t\u0013\n\u0015\u000b\u0017\f\u0019\r\u001b\u0000\u001d"+ - "\u0000\u001f\u0000!\u0000#\u0000%\u0000\'\u0000)\u0000+\u0000-\u0000/"+ - "\u00001\u00003\u00005\u00007\u0000\u0001\u0000\u000b\u0002\u0000AAaa\u0002"+ - "\u0000NNnn\u0002\u0000DDdd\u0002\u0000OOoo\u0002\u0000RRrr\u0002\u0000"+ - "TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r\r \u3000\u3000\t\u0000 \"\"("+ - "*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b9\u0000\u0001\u0001"+ - "\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005\u0001"+ - "\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001\u0000"+ - "\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000\u0000"+ - "\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000\u0000"+ - "\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000\u0000"+ - "\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000\u0000"+ - "\u0000\u00019\u0001\u0000\u0000\u0000\u0003=\u0001\u0000\u0000\u0000\u0005"+ - "A\u0001\u0000\u0000\u0000\u0007D\u0001\u0000\u0000\u0000\tH\u0001\u0000"+ - "\u0000\u0000\u000bN\u0001\u0000\u0000\u0000\rP\u0001\u0000\u0000\u0000"+ - "\u000fR\u0001\u0000\u0000\u0000\u0011T\u0001\u0000\u0000\u0000\u0013V"+ - "\u0001\u0000\u0000\u0000\u0015[\u0001\u0000\u0000\u0000\u0017i\u0001\u0000"+ - "\u0000\u0000\u0019s\u0001\u0000\u0000\u0000\u001bw\u0001\u0000\u0000\u0000"+ - "\u001dy\u0001\u0000\u0000\u0000\u001f{\u0001\u0000\u0000\u0000!~\u0001"+ - "\u0000\u0000\u0000#\u0080\u0001\u0000\u0000\u0000%\u0090\u0001\u0000\u0000"+ - "\u0000\'\u0096\u0001\u0000\u0000\u0000)\u0098\u0001\u0000\u0000\u0000"+ - "+\u00a0\u0001\u0000\u0000\u0000-\u00a2\u0001\u0000\u0000\u0000/\u00a4"+ - "\u0001\u0000\u0000\u00001\u00a7\u0001\u0000\u0000\u00003\u00aa\u0001\u0000"+ - "\u0000\u00005\u00ad\u0001\u0000\u0000\u00007\u00b3\u0001\u0000\u0000\u0000"+ - "9:\u0003)\u0014\u0000:;\u0001\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000"+ - "<\u0002\u0001\u0000\u0000\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001"+ - "\u0000\u0000?@\u0007\u0002\u0000\u0000@\u0004\u0001\u0000\u0000\u0000"+ - "AB\u0007\u0003\u0000\u0000BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000"+ - "\u0000\u0000DE\u0007\u0001\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007"+ - "\u0005\u0000\u0000G\b\u0001\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n"+ - "\u0001\u0000\u0000\u0000JO\u0003\u001d\u000e\u0000KO\u0003!\u0010\u0000"+ - "LO\u0003\u001f\u000f\u0000MO\u0003#\u0011\u0000NJ\u0001\u0000\u0000\u0000"+ - "NK\u0001\u0000\u0000\u0000NL\u0001\u0000\u0000\u0000NM\u0001\u0000\u0000"+ - "\u0000O\f\u0001\u0000\u0000\u0000PQ\u0005(\u0000\u0000Q\u000e\u0001\u0000"+ - "\u0000\u0000RS\u0005)\u0000\u0000S\u0010\u0001\u0000\u0000\u0000TU\u0005"+ - "{\u0000\u0000U\u0012\u0001\u0000\u0000\u0000VW\u0005}\u0000\u0000W\u0014"+ - "\u0001\u0000\u0000\u0000XZ\u0003\u0019\f\u0000YX\u0001\u0000\u0000\u0000"+ - "Z]\u0001\u0000\u0000\u0000[Y\u0001\u0000\u0000\u0000[\\\u0001\u0000\u0000"+ - "\u0000\\_\u0001\u0000\u0000\u0000][\u0001\u0000\u0000\u0000^`\u0003%\u0012"+ - "\u0000_^\u0001\u0000\u0000\u0000`a\u0001\u0000\u0000\u0000a_\u0001\u0000"+ - "\u0000\u0000ab\u0001\u0000\u0000\u0000bf\u0001\u0000\u0000\u0000ce\u0003"+ - "\u0019\f\u0000dc\u0001\u0000\u0000\u0000eh\u0001\u0000\u0000\u0000fd\u0001"+ - "\u0000\u0000\u0000fg\u0001\u0000\u0000\u0000g\u0016\u0001\u0000\u0000"+ - "\u0000hf\u0001\u0000\u0000\u0000im\u0005\"\u0000\u0000jl\u0003\'\u0013"+ - "\u0000kj\u0001\u0000\u0000\u0000lo\u0001\u0000\u0000\u0000mk\u0001\u0000"+ - "\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000\u0000\u0000om\u0001"+ - "\u0000\u0000\u0000pq\u0005\"\u0000\u0000q\u0018\u0001\u0000\u0000\u0000"+ - "rt\u0003\u001b\r\u0000sr\u0001\u0000\u0000\u0000tu\u0001\u0000\u0000\u0000"+ - "us\u0001\u0000\u0000\u0000uv\u0001\u0000\u0000\u0000v\u001a\u0001\u0000"+ - "\u0000\u0000wx\u0005*\u0000\u0000x\u001c\u0001\u0000\u0000\u0000yz\u0005"+ - "<\u0000\u0000z\u001e\u0001\u0000\u0000\u0000{|\u0005<\u0000\u0000|}\u0005"+ - "=\u0000\u0000} \u0001\u0000\u0000\u0000~\u007f\u0005>\u0000\u0000\u007f"+ - "\"\u0001\u0000\u0000\u0000\u0080\u0081\u0005>\u0000\u0000\u0081\u0082"+ - "\u0005=\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u0091\u0003+"+ - "\u0015\u0000\u0084\u0091\u0003/\u0017\u0000\u0085\u0091\u00033\u0019\u0000"+ - "\u0086\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088"+ - "\u008b\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a"+ - "\u0087\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a"+ - "\u0089\u0001\u0000\u0000\u0000\u008b\u0091\u0001\u0000\u0000\u0000\u008c"+ - "\u008d\u0003\u001b\r\u0000\u008d\u008e\u0003%\u0012\u0000\u008e\u0091"+ - "\u0001\u0000\u0000\u0000\u008f\u0091\u0003-\u0016\u0000\u0090\u0083\u0001"+ - "\u0000\u0000\u0000\u0090\u0084\u0001\u0000\u0000\u0000\u0090\u0085\u0001"+ - "\u0000\u0000\u0000\u0090\u0086\u0001\u0000\u0000\u0000\u0090\u008c\u0001"+ - "\u0000\u0000\u0000\u0090\u008f\u0001\u0000\u0000\u0000\u0091&\u0001\u0000"+ - "\u0000\u0000\u0092\u0097\u0003+\u0015\u0000\u0093\u0097\u00033\u0019\u0000"+ - "\u0094\u0097\u00031\u0018\u0000\u0095\u0097\b\u0006\u0000\u0000\u0096"+ - "\u0092\u0001\u0000\u0000\u0000\u0096\u0093\u0001\u0000\u0000\u0000\u0096"+ - "\u0094\u0001\u0000\u0000\u0000\u0096\u0095\u0001\u0000\u0000\u0000\u0097"+ - "(\u0001\u0000\u0000\u0000\u0098\u0099\u0007\u0007\u0000\u0000\u0099*\u0001"+ - "\u0000\u0000\u0000\u009a\u009b\u0005\\\u0000\u0000\u009b\u00a1\u0007\u0004"+ - "\u0000\u0000\u009c\u009d\u0005\\\u0000\u0000\u009d\u00a1\u0007\u0005\u0000"+ - "\u0000\u009e\u009f\u0005\\\u0000\u0000\u009f\u00a1\u0007\u0001\u0000\u0000"+ - "\u00a0\u009a\u0001\u0000\u0000\u0000\u00a0\u009c\u0001\u0000\u0000\u0000"+ - "\u00a0\u009e\u0001\u0000\u0000\u0000\u00a1,\u0001\u0000\u0000\u0000\u00a2"+ - "\u00a3\b\b\u0000\u0000\u00a3.\u0001\u0000\u0000\u0000\u00a4\u00a5\u0005"+ - "\\\u0000\u0000\u00a5\u00a6\u0007\b\u0000\u0000\u00a60\u0001\u0000\u0000"+ - "\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8\u00a9\u0005\"\u0000\u0000"+ - "\u00a92\u0001\u0000\u0000\u0000\u00aa\u00ab\u0005\\\u0000\u0000\u00ab"+ - "\u00ac\u00035\u001a\u0000\u00ac4\u0001\u0000\u0000\u0000\u00ad\u00ae\u0007"+ - "\t\u0000\u0000\u00ae\u00af\u00037\u001b\u0000\u00af\u00b0\u00037\u001b"+ - "\u0000\u00b0\u00b1\u00037\u001b\u0000\u00b1\u00b2\u00037\u001b\u0000\u00b2"+ - "6\u0001\u0000\u0000\u0000\u00b3\u00b4\u0007\n\u0000\u0000\u00b48\u0001"+ - "\u0000\u0000\u0000\u000b\u0000N[afmu\u008a\u0090\u0096\u00a0\u0001\u0006"+ - "\u0000\u0000"; + "\u0004\u0000\u0010\u00b2\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002"+ + "\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002"+ + "\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002"+ + "\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002"+ + "\u000b\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e"+ + "\u0002\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011"+ + "\u0002\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014"+ + "\u0002\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017"+ + "\u0002\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a"+ + "\u0002\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004"+ + "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006"+ + "\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001"+ + "\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\r\u0004\r^\b"+ + "\r\u000b\r\f\r_\u0001\u000e\u0001\u000e\u0005\u000ed\b\u000e\n\u000e\f"+ + "\u000eg\t\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001"+ + "\u0010\u0001\u0010\u0001\u0011\u0005\u0011p\b\u0011\n\u0011\f\u0011s\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0005\u0011w\b\u0011\n\u0011\f\u0011z\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0004\u0011~\b\u0011\u000b\u0011\f\u0011"+ + "\u007f\u0003\u0011\u0082\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001"+ + "\u0012\u0003\u0012\u008e\b\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001"+ + "\u0013\u0003\u0013\u0094\b\u0013\u0001\u0014\u0001\u0014\u0001\u0015\u0001"+ + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0003\u0015\u009e"+ + "\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ + "\u001b\u0001\u001b\u0000\u0000\u001c\u0001\u0001\u0003\u0002\u0005\u0003"+ + "\u0007\u0004\t\u0005\u000b\u0006\r\u0007\u000f\b\u0011\t\u0013\n\u0015"+ + "\u000b\u0017\f\u0019\r\u001b\u000e\u001d\u000f\u001f\u0010!\u0000#\u0000"+ + "%\u0000\'\u0000)\u0000+\u0000-\u0000/\u00001\u00003\u00005\u00007\u0000"+ + "\u0001\u0000\f\u0002\u0000AAaa\u0002\u0000NNnn\u0002\u0000DDdd\u0002\u0000"+ + "OOoo\u0002\u0000RRrr\u0002\u0000TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r"+ + "\r \u3000\u3000\f\u0000\t\n\r\r \"\"(*::<<>>\\\\{{}}\u3000\u3000\t\u0000"+ + " \"\"(*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b6\u0000\u0001"+ + "\u0001\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005"+ + "\u0001\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001"+ + "\u0000\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000"+ + "\u0000\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000"+ + "\u0000\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000"+ + "\u0000\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000"+ + "\u0000\u0000\u0000\u001b\u0001\u0000\u0000\u0000\u0000\u001d\u0001\u0000"+ + "\u0000\u0000\u0000\u001f\u0001\u0000\u0000\u0000\u00019\u0001\u0000\u0000"+ + "\u0000\u0003=\u0001\u0000\u0000\u0000\u0005A\u0001\u0000\u0000\u0000\u0007"+ + "D\u0001\u0000\u0000\u0000\tH\u0001\u0000\u0000\u0000\u000bJ\u0001\u0000"+ + "\u0000\u0000\rL\u0001\u0000\u0000\u0000\u000fO\u0001\u0000\u0000\u0000"+ + "\u0011Q\u0001\u0000\u0000\u0000\u0013T\u0001\u0000\u0000\u0000\u0015V"+ + "\u0001\u0000\u0000\u0000\u0017X\u0001\u0000\u0000\u0000\u0019Z\u0001\u0000"+ + "\u0000\u0000\u001b]\u0001\u0000\u0000\u0000\u001da\u0001\u0000\u0000\u0000"+ + "\u001fj\u0001\u0000\u0000\u0000!l\u0001\u0000\u0000\u0000#\u0081\u0001"+ + "\u0000\u0000\u0000%\u008d\u0001\u0000\u0000\u0000\'\u0093\u0001\u0000"+ + "\u0000\u0000)\u0095\u0001\u0000\u0000\u0000+\u009d\u0001\u0000\u0000\u0000"+ + "-\u009f\u0001\u0000\u0000\u0000/\u00a1\u0001\u0000\u0000\u00001\u00a4"+ + "\u0001\u0000\u0000\u00003\u00a7\u0001\u0000\u0000\u00005\u00aa\u0001\u0000"+ + "\u0000\u00007\u00b0\u0001\u0000\u0000\u00009:\u0003)\u0014\u0000:;\u0001"+ + "\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000<\u0002\u0001\u0000\u0000"+ + "\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001\u0000\u0000?@\u0007\u0002"+ + "\u0000\u0000@\u0004\u0001\u0000\u0000\u0000AB\u0007\u0003\u0000\u0000"+ + "BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000\u0000\u0000DE\u0007\u0001"+ + "\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007\u0005\u0000\u0000G\b\u0001"+ + "\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n\u0001\u0000\u0000\u0000JK\u0005"+ + "<\u0000\u0000K\f\u0001\u0000\u0000\u0000LM\u0005<\u0000\u0000MN\u0005"+ + "=\u0000\u0000N\u000e\u0001\u0000\u0000\u0000OP\u0005>\u0000\u0000P\u0010"+ + "\u0001\u0000\u0000\u0000QR\u0005>\u0000\u0000RS\u0005=\u0000\u0000S\u0012"+ + "\u0001\u0000\u0000\u0000TU\u0005(\u0000\u0000U\u0014\u0001\u0000\u0000"+ + "\u0000VW\u0005)\u0000\u0000W\u0016\u0001\u0000\u0000\u0000XY\u0005{\u0000"+ + "\u0000Y\u0018\u0001\u0000\u0000\u0000Z[\u0005}\u0000\u0000[\u001a\u0001"+ + "\u0000\u0000\u0000\\^\u0003#\u0011\u0000]\\\u0001\u0000\u0000\u0000^_"+ + "\u0001\u0000\u0000\u0000_]\u0001\u0000\u0000\u0000_`\u0001\u0000\u0000"+ + "\u0000`\u001c\u0001\u0000\u0000\u0000ae\u0005\"\u0000\u0000bd\u0003\'"+ + "\u0013\u0000cb\u0001\u0000\u0000\u0000dg\u0001\u0000\u0000\u0000ec\u0001"+ + "\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000fh\u0001\u0000\u0000\u0000"+ + "ge\u0001\u0000\u0000\u0000hi\u0005\"\u0000\u0000i\u001e\u0001\u0000\u0000"+ + "\u0000jk\u0003!\u0010\u0000k \u0001\u0000\u0000\u0000lm\u0005*\u0000\u0000"+ + "m\"\u0001\u0000\u0000\u0000np\u0003!\u0010\u0000on\u0001\u0000\u0000\u0000"+ + "ps\u0001\u0000\u0000\u0000qo\u0001\u0000\u0000\u0000qr\u0001\u0000\u0000"+ + "\u0000rt\u0001\u0000\u0000\u0000sq\u0001\u0000\u0000\u0000tx\u0003%\u0012"+ + "\u0000uw\u0003!\u0010\u0000vu\u0001\u0000\u0000\u0000wz\u0001\u0000\u0000"+ + "\u0000xv\u0001\u0000\u0000\u0000xy\u0001\u0000\u0000\u0000y\u0082\u0001"+ + "\u0000\u0000\u0000zx\u0001\u0000\u0000\u0000{}\u0003!\u0010\u0000|~\u0003"+ + "!\u0010\u0000}|\u0001\u0000\u0000\u0000~\u007f\u0001\u0000\u0000\u0000"+ + "\u007f}\u0001\u0000\u0000\u0000\u007f\u0080\u0001\u0000\u0000\u0000\u0080"+ + "\u0082\u0001\u0000\u0000\u0000\u0081q\u0001\u0000\u0000\u0000\u0081{\u0001"+ + "\u0000\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u008e\u0003+\u0015"+ + "\u0000\u0084\u008e\u0003/\u0017\u0000\u0085\u008e\u00033\u0019\u0000\u0086"+ + "\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088\u008b"+ + "\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a\u0087"+ + "\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a\u0089"+ + "\u0001\u0000\u0000\u0000\u008b\u008e\u0001\u0000\u0000\u0000\u008c\u008e"+ + "\u0003-\u0016\u0000\u008d\u0083\u0001\u0000\u0000\u0000\u008d\u0084\u0001"+ + "\u0000\u0000\u0000\u008d\u0085\u0001\u0000\u0000\u0000\u008d\u0086\u0001"+ + "\u0000\u0000\u0000\u008d\u008c\u0001\u0000\u0000\u0000\u008e&\u0001\u0000"+ + "\u0000\u0000\u008f\u0094\u0003+\u0015\u0000\u0090\u0094\u00033\u0019\u0000"+ + "\u0091\u0094\u00031\u0018\u0000\u0092\u0094\b\u0006\u0000\u0000\u0093"+ + "\u008f\u0001\u0000\u0000\u0000\u0093\u0090\u0001\u0000\u0000\u0000\u0093"+ + "\u0091\u0001\u0000\u0000\u0000\u0093\u0092\u0001\u0000\u0000\u0000\u0094"+ + "(\u0001\u0000\u0000\u0000\u0095\u0096\u0007\u0007\u0000\u0000\u0096*\u0001"+ + "\u0000\u0000\u0000\u0097\u0098\u0005\\\u0000\u0000\u0098\u009e\u0007\u0004"+ + "\u0000\u0000\u0099\u009a\u0005\\\u0000\u0000\u009a\u009e\u0007\u0005\u0000"+ + "\u0000\u009b\u009c\u0005\\\u0000\u0000\u009c\u009e\u0007\u0001\u0000\u0000"+ + "\u009d\u0097\u0001\u0000\u0000\u0000\u009d\u0099\u0001\u0000\u0000\u0000"+ + "\u009d\u009b\u0001\u0000\u0000\u0000\u009e,\u0001\u0000\u0000\u0000\u009f"+ + "\u00a0\b\b\u0000\u0000\u00a0.\u0001\u0000\u0000\u0000\u00a1\u00a2\u0005"+ + "\\\u0000\u0000\u00a2\u00a3\u0007\t\u0000\u0000\u00a30\u0001\u0000\u0000"+ + "\u0000\u00a4\u00a5\u0005\\\u0000\u0000\u00a5\u00a6\u0005\"\u0000\u0000"+ + "\u00a62\u0001\u0000\u0000\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8"+ + "\u00a9\u00035\u001a\u0000\u00a94\u0001\u0000\u0000\u0000\u00aa\u00ab\u0007"+ + "\n\u0000\u0000\u00ab\u00ac\u00037\u001b\u0000\u00ac\u00ad\u00037\u001b"+ + "\u0000\u00ad\u00ae\u00037\u001b\u0000\u00ae\u00af\u00037\u001b\u0000\u00af"+ + "6\u0001\u0000\u0000\u0000\u00b0\u00b1\u0007\u000b\u0000\u0000\u00b18\u0001"+ + "\u0000\u0000\u0000\u000b\u0000_eqx\u007f\u0081\u008a\u008d\u0093\u009d"+ + "\u0001\u0006\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index bce2044fa8175..505569dbde58d 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -71,16 +71,6 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void enterExpression(KqlBaseParser.ExpressionContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void exitExpression(KqlBaseParser.ExpressionContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -92,45 +82,35 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. + * Enter a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void enterFieldName(KqlBaseParser.FieldNameContext ctx); + void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Exit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void exitFieldName(KqlBaseParser.FieldNameContext ctx); + void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -142,53 +122,53 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Enter a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Exit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void enterFieldName(KqlBaseParser.FieldNameContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void exitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 3bd9cc4104d2c..3ee44e389a371 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -25,37 +25,35 @@ class KqlBaseParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int - RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_expression = 3, - RULE_nestedQuery = 4, RULE_parenthesizedQuery = 5, RULE_fieldRangeQuery = 6, - RULE_fieldTermQuery = 7, RULE_fieldName = 8, RULE_rangeQueryValue = 9, - RULE_termQueryValue = 10, RULE_groupingTermExpression = 11, RULE_unquotedLiteralExpression = 12, - RULE_quotedStringExpression = 13, RULE_wildcardExpression = 14; + RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_nestedQuery = 3, + RULE_matchAllQuery = 4, RULE_parenthesizedQuery = 5, RULE_rangeQuery = 6, + RULE_rangeQueryValue = 7, RULE_existsQuery = 8, RULE_fieldQuery = 9, RULE_fieldLessQuery = 10, + RULE_fieldQueryValue = 11, RULE_fieldName = 12; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "expression", "nestedQuery", - "parenthesizedQuery", "fieldRangeQuery", "fieldTermQuery", "fieldName", - "rangeQueryValue", "termQueryValue", "groupingTermExpression", "unquotedLiteralExpression", - "quotedStringExpression", "wildcardExpression" + "topLevelQuery", "query", "simpleQuery", "nestedQuery", "matchAllQuery", + "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", + "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -141,17 +139,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(31); + setState(27); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 14480L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(30); + setState(26); query(0); } } - setState(33); + setState(29); match(EOF); } } @@ -202,6 +200,7 @@ public T accept(ParseTreeVisitor visitor) { } @SuppressWarnings("CheckReturnValue") public static class BooleanQueryContext extends QueryContext { + public Token operator; public List query() { return getRuleContexts(QueryContext.class); } @@ -262,38 +261,33 @@ private QueryContext query(int _p) throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(39); + setState(35); _errHandler.sync(this); - switch (_input.LA(1)) { - case NOT: + switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { + case 1: { _localctx = new NotQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(36); + setState(32); match(NOT); - setState(37); + setState(33); ((NotQueryContext)_localctx).subQuery = simpleQuery(); } break; - case LEFT_PARENTHESIS: - case UNQUOTED_LITERAL: - case QUOTED_STRING: - case WILDCARD: + case 2: { _localctx = new DefaultQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(38); + setState(34); simpleQuery(); } break; - default: - throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); - setState(46); + setState(42); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -304,24 +298,25 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(41); + setState(37); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(42); + setState(38); + ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { - _errHandler.recoverInline(this); + ((BooleanQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } - setState(43); - query(4); + setState(39); + query(3); } } } - setState(48); + setState(44); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); } @@ -343,12 +338,24 @@ public static class SimpleQueryContext extends ParserRuleContext { public NestedQueryContext nestedQuery() { return getRuleContext(NestedQueryContext.class,0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } public ParenthesizedQueryContext parenthesizedQuery() { return getRuleContext(ParenthesizedQueryContext.class,0); } + public MatchAllQueryContext matchAllQuery() { + return getRuleContext(MatchAllQueryContext.class,0); + } + public ExistsQueryContext existsQuery() { + return getRuleContext(ExistsQueryContext.class,0); + } + public RangeQueryContext rangeQuery() { + return getRuleContext(RangeQueryContext.class,0); + } + public FieldQueryContext fieldQuery() { + return getRuleContext(FieldQueryContext.class,0); + } + public FieldLessQueryContext fieldLessQuery() { + return getRuleContext(FieldLessQueryContext.class,0); + } public SimpleQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -378,83 +385,50 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { case 1: enterOuterAlt(_localctx, 1); { - setState(49); + setState(45); nestedQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(50); - expression(); + setState(46); + parenthesizedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(51); - parenthesizedQuery(); + setState(47); + matchAllQuery(); } break; - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class ExpressionContext extends ParserRuleContext { - public FieldTermQueryContext fieldTermQuery() { - return getRuleContext(FieldTermQueryContext.class,0); - } - public FieldRangeQueryContext fieldRangeQuery() { - return getRuleContext(FieldRangeQueryContext.class,0); - } - public ExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_expression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExpression(this); - else return visitor.visitChildren(this); - } - } - - public final ExpressionContext expression() throws RecognitionException { - ExpressionContext _localctx = new ExpressionContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_expression); - try { - setState(56); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { - case 1: - enterOuterAlt(_localctx, 1); + case 4: + enterOuterAlt(_localctx, 4); { - setState(54); - fieldTermQuery(); + setState(48); + existsQuery(); } break; - case 2: - enterOuterAlt(_localctx, 2); + case 5: + enterOuterAlt(_localctx, 5); { - setState(55); - fieldRangeQuery(); + setState(49); + rangeQuery(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(50); + fieldQuery(); + } + break; + case 7: + enterOuterAlt(_localctx, 7); + { + setState(51); + fieldLessQuery(); } break; } @@ -502,19 +476,19 @@ public T accept(ParseTreeVisitor visitor) { public final NestedQueryContext nestedQuery() throws RecognitionException { NestedQueryContext _localctx = new NestedQueryContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_nestedQuery); + enterRule(_localctx, 6, RULE_nestedQuery); try { enterOuterAlt(_localctx, 1); { - setState(58); + setState(54); fieldName(); - setState(59); + setState(55); match(COLON); - setState(60); + setState(56); match(LEFT_CURLY_BRACKET); - setState(61); + setState(57); query(0); - setState(62); + setState(58); match(RIGHT_CURLY_BRACKET); } } @@ -530,43 +504,51 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class ParenthesizedQueryContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public QueryContext query() { - return getRuleContext(QueryContext.class,0); + public static class MatchAllQueryContext extends ParserRuleContext { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public MatchAllQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } + @Override public int getRuleIndex() { return RULE_matchAllQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterMatchAllQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitMatchAllQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitMatchAllQuery(this); else return visitor.visitChildren(this); } } - public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { - ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_parenthesizedQuery); + public final MatchAllQueryContext matchAllQuery() throws RecognitionException { + MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { + setState(62); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + case 1: + { + setState(60); + match(WILDCARD); + setState(61); + match(COLON); + } + break; + } setState(64); - match(LEFT_PARENTHESIS); - setState(65); - query(0); - setState(66); - match(RIGHT_PARENTHESIS); + match(WILDCARD); } } catch (RecognitionException re) { @@ -581,46 +563,43 @@ public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionEx } @SuppressWarnings("CheckReturnValue") - public static class FieldRangeQueryContext extends ParserRuleContext { - public Token operator; - public FieldNameContext fieldName() { - return getRuleContext(FieldNameContext.class,0); - } - public RangeQueryValueContext rangeQueryValue() { - return getRuleContext(RangeQueryValueContext.class,0); + public static class ParenthesizedQueryContext extends ParserRuleContext { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); } - public TerminalNode OP_COMPARE() { return getToken(KqlBaseParser.OP_COMPARE, 0); } - public FieldRangeQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldRangeQuery; } + @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldRangeQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); else return visitor.visitChildren(this); } } - public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionException { - FieldRangeQueryContext _localctx = new FieldRangeQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_fieldRangeQuery); + public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { + ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { + setState(66); + match(LEFT_PARENTHESIS); + setState(67); + query(0); setState(68); - fieldName(); - setState(69); - ((FieldRangeQueryContext)_localctx).operator = match(OP_COMPARE); - setState(70); - rangeQueryValue(); + match(RIGHT_PARENTHESIS); } } catch (RecognitionException re) { @@ -635,53 +614,59 @@ public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class FieldTermQueryContext extends ParserRuleContext { - public TermQueryValueContext termQueryValue() { - return getRuleContext(TermQueryValueContext.class,0); - } + public static class RangeQueryContext extends ParserRuleContext { + public Token operator; public FieldNameContext fieldName() { return getRuleContext(FieldNameContext.class,0); } - public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } - public FieldTermQueryContext(ParserRuleContext parent, int invokingState) { + public RangeQueryValueContext rangeQueryValue() { + return getRuleContext(RangeQueryValueContext.class,0); + } + public TerminalNode OP_LESS() { return getToken(KqlBaseParser.OP_LESS, 0); } + public TerminalNode OP_LESS_EQ() { return getToken(KqlBaseParser.OP_LESS_EQ, 0); } + public TerminalNode OP_MORE() { return getToken(KqlBaseParser.OP_MORE, 0); } + public TerminalNode OP_MORE_EQ() { return getToken(KqlBaseParser.OP_MORE_EQ, 0); } + public RangeQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldTermQuery; } + @Override public int getRuleIndex() { return RULE_rangeQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldTermQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQuery(this); else return visitor.visitChildren(this); } } - public final FieldTermQueryContext fieldTermQuery() throws RecognitionException { - FieldTermQueryContext _localctx = new FieldTermQueryContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_fieldTermQuery); + public final RangeQueryContext rangeQuery() throws RecognitionException { + RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_rangeQuery); + int _la; try { enterOuterAlt(_localctx, 1); { - setState(75); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { - case 1: - { - setState(72); - fieldName(); - setState(73); - match(COLON); - } - break; + setState(70); + fieldName(); + setState(71); + ((RangeQueryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { + ((RangeQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } - setState(77); - termQueryValue(); + setState(72); + rangeQueryValue(); } } catch (RecognitionException re) { @@ -696,61 +681,83 @@ public final FieldTermQueryContext fieldTermQuery() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class FieldNameContext extends ParserRuleContext { - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class RangeQueryValueContext extends ParserRuleContext { + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public FieldNameContext(ParserRuleContext parent, int invokingState) { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldName; } + @Override public int getRuleIndex() { return RULE_rangeQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); else return visitor.visitChildren(this); } } - public final FieldNameContext fieldName() throws RecognitionException { - FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_fieldName); + public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { + RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_rangeQueryValue); + int _la; try { - setState(82); + int _alt; + setState(80); _errHandler.sync(this); switch (_input.LA(1)) { + case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(79); - wildcardExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 2); - { - setState(80); - unquotedLiteralExpression(); + setState(75); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(74); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,5,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: - enterOuterAlt(_localctx, 3); + enterOuterAlt(_localctx, 2); { - setState(81); - quotedStringExpression(); + setState(79); + match(QUOTED_STRING); } break; default: @@ -769,55 +776,43 @@ public final FieldNameContext fieldName() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class RangeQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public static class ExistsQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } + public ExistsQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_rangeQueryValue; } + @Override public int getRuleIndex() { return RULE_existsQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExistsQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExistsQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExistsQuery(this); else return visitor.visitChildren(this); } } - public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { - RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_rangeQueryValue); + public final ExistsQueryContext existsQuery() throws RecognitionException { + ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_existsQuery); try { - setState(86); - _errHandler.sync(this); - switch (_input.LA(1)) { - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 1); - { - setState(84); - unquotedLiteralExpression(); - } - break; - case QUOTED_STRING: - enterOuterAlt(_localctx, 2); - { - setState(85); - quotedStringExpression(); - } - break; - default: - throw new NoViableAltException(this); + enterOuterAlt(_localctx, 1); + { + setState(82); + fieldName(); + setState(83); + match(COLON); + setState(84); + match(WILDCARD); } } catch (RecognitionException re) { @@ -832,76 +827,68 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class TermQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext termValue; - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public GroupingTermExpressionContext groupingTermExpression() { - return getRuleContext(GroupingTermExpressionContext.class,0); + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } - public TermQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public FieldQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_termQueryValue; } + @Override public int getRuleIndex() { return RULE_fieldQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitTermQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQuery(this); else return visitor.visitChildren(this); } } - public final TermQueryValueContext termQueryValue() throws RecognitionException { - TermQueryValueContext _localctx = new TermQueryValueContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_termQueryValue); + public final FieldQueryContext fieldQuery() throws RecognitionException { + FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_fieldQuery); try { - setState(92); + setState(96); _errHandler.sync(this); - switch (_input.LA(1)) { - case WILDCARD: + switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { + case 1: enterOuterAlt(_localctx, 1); { + setState(86); + fieldName(); + setState(87); + match(COLON); setState(88); - wildcardExpression(); + fieldQueryValue(); } break; - case QUOTED_STRING: + case 2: enterOuterAlt(_localctx, 2); { - setState(89); - quotedStringExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 3); - { setState(90); - ((TermQueryValueContext)_localctx).termValue = unquotedLiteralExpression(); - } - break; - case LEFT_PARENTHESIS: - enterOuterAlt(_localctx, 4); - { + fieldName(); setState(91); - groupingTermExpression(); + match(COLON); + setState(92); + match(LEFT_PARENTHESIS); + setState(93); + fieldQueryValue(); + setState(94); + match(RIGHT_PARENTHESIS); } break; - default: - throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -916,43 +903,63 @@ public final TermQueryValueContext termQueryValue() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class GroupingTermExpressionContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldLessQueryContext extends ParserRuleContext { + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public GroupingTermExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldLessQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_groupingTermExpression; } + @Override public int getRuleIndex() { return RULE_fieldLessQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldLessQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldLessQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitGroupingTermExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldLessQuery(this); else return visitor.visitChildren(this); } } - public final GroupingTermExpressionContext groupingTermExpression() throws RecognitionException { - GroupingTermExpressionContext _localctx = new GroupingTermExpressionContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_groupingTermExpression); + public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { + FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_fieldLessQuery); try { - enterOuterAlt(_localctx, 1); - { - setState(94); - match(LEFT_PARENTHESIS); - setState(95); - unquotedLiteralExpression(); - setState(96); - match(RIGHT_PARENTHESIS); + setState(103); + _errHandler.sync(this); + switch (_input.LA(1)) { + case AND: + case OR: + case NOT: + case UNQUOTED_LITERAL: + case QUOTED_STRING: + case WILDCARD: + enterOuterAlt(_localctx, 1); + { + setState(98); + fieldQueryValue(); + } + break; + case LEFT_PARENTHESIS: + enterOuterAlt(_localctx, 2); + { + setState(99); + match(LEFT_PARENTHESIS); + setState(100); + fieldQueryValue(); + setState(101); + match(RIGHT_PARENTHESIS); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -967,57 +974,171 @@ public final GroupingTermExpressionContext groupingTermExpression() throws Recog } @SuppressWarnings("CheckReturnValue") - public static class UnquotedLiteralExpressionContext extends ParserRuleContext { + public static class FieldQueryValueContext extends ParserRuleContext { + public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } public TerminalNode UNQUOTED_LITERAL(int i) { return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public UnquotedLiteralExpressionContext(ParserRuleContext parent, int invokingState) { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); + } + public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public FieldQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_unquotedLiteralExpression; } + @Override public int getRuleIndex() { return RULE_fieldQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitUnquotedLiteralExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQueryValue(this); else return visitor.visitChildren(this); } } - public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws RecognitionException { - UnquotedLiteralExpressionContext _localctx = new UnquotedLiteralExpressionContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_unquotedLiteralExpression); + public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { + FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_fieldQueryValue); + int _la; try { int _alt; - enterOuterAlt(_localctx, 1); - { - setState(99); + setState(123); _errHandler.sync(this); - _alt = 1; - do { - switch (_alt) { - case 1: + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(106); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==AND || _la==OR) { { + setState(105); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + + setState(109); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(108); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(111); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(114); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(113); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(116); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(119); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: { - setState(98); - match(UNQUOTED_LITERAL); + setState(118); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } } break; - default: - throw new NoViableAltException(this); } - setState(101); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,9,_ctx); - } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(121); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(122); + match(QUOTED_STRING); + } + break; } } catch (RecognitionException re) { @@ -1032,78 +1153,76 @@ public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws } @SuppressWarnings("CheckReturnValue") - public static class QuotedStringExpressionContext extends ParserRuleContext { - public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } - public QuotedStringExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_quotedStringExpression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterQuotedStringExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitQuotedStringExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitQuotedStringExpression(this); - else return visitor.visitChildren(this); - } - } - - public final QuotedStringExpressionContext quotedStringExpression() throws RecognitionException { - QuotedStringExpressionContext _localctx = new QuotedStringExpressionContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_quotedStringExpression); - try { - enterOuterAlt(_localctx, 1); - { - setState(103); - match(QUOTED_STRING); - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); + public static class FieldNameContext extends ParserRuleContext { + public Token value; + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class WildcardExpressionContext extends ParserRuleContext { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } - public WildcardExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldNameContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_wildcardExpression; } + @Override public int getRuleIndex() { return RULE_fieldName; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitWildcardExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); else return visitor.visitChildren(this); } } - public final WildcardExpressionContext wildcardExpression() throws RecognitionException { - WildcardExpressionContext _localctx = new WildcardExpressionContext(_ctx, getState()); - enterRule(_localctx, 28, RULE_wildcardExpression); + public final FieldNameContext fieldName() throws RecognitionException { + FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_fieldName); + int _la; try { - enterOuterAlt(_localctx, 1); - { - setState(105); - match(WILDCARD); + setState(132); + _errHandler.sync(this); + switch (_input.LA(1)) { + case UNQUOTED_LITERAL: + enterOuterAlt(_localctx, 1); + { + setState(126); + _errHandler.sync(this); + _la = _input.LA(1); + do { + { + { + setState(125); + ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); + } + } + setState(128); + _errHandler.sync(this); + _la = _input.LA(1); + } while ( _la==UNQUOTED_LITERAL ); + } + break; + case QUOTED_STRING: + enterOuterAlt(_localctx, 2); + { + setState(130); + ((FieldNameContext)_localctx).value = match(QUOTED_STRING); + } + break; + case WILDCARD: + enterOuterAlt(_localctx, 3); + { + setState(131); + ((FieldNameContext)_localctx).value = match(WILDCARD); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -1133,65 +1252,86 @@ private boolean query_sempred(QueryContext _localctx, int predIndex) { } public static final String _serializedATN = - "\u0004\u0001\rl\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ - "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ - "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ - "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ - "\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0001\u0000\u0003\u0000"+ - " \b\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0003\u0001(\b\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0005\u0001-\b\u0001\n\u0001\f\u00010\t\u0001\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003\u0003\u0003"+ - "9\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006"+ - "\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007"+ - "\u0003\u0007L\b\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b"+ - "\u0003\bS\b\b\u0001\t\u0001\t\u0003\tW\b\t\u0001\n\u0001\n\u0001\n\u0001"+ - "\n\u0003\n]\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001"+ - "\f\u0004\fd\b\f\u000b\f\f\fe\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0000\u0001\u0002\u000f\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010"+ - "\u0012\u0014\u0016\u0018\u001a\u001c\u0000\u0001\u0001\u0000\u0002\u0003"+ - "j\u0000\u001f\u0001\u0000\u0000\u0000\u0002\'\u0001\u0000\u0000\u0000"+ - "\u00044\u0001\u0000\u0000\u0000\u00068\u0001\u0000\u0000\u0000\b:\u0001"+ - "\u0000\u0000\u0000\n@\u0001\u0000\u0000\u0000\fD\u0001\u0000\u0000\u0000"+ - "\u000eK\u0001\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012V"+ - "\u0001\u0000\u0000\u0000\u0014\\\u0001\u0000\u0000\u0000\u0016^\u0001"+ - "\u0000\u0000\u0000\u0018c\u0001\u0000\u0000\u0000\u001ag\u0001\u0000\u0000"+ - "\u0000\u001ci\u0001\u0000\u0000\u0000\u001e \u0003\u0002\u0001\u0000\u001f"+ - "\u001e\u0001\u0000\u0000\u0000\u001f \u0001\u0000\u0000\u0000 !\u0001"+ - "\u0000\u0000\u0000!\"\u0005\u0000\u0000\u0001\"\u0001\u0001\u0000\u0000"+ - "\u0000#$\u0006\u0001\uffff\uffff\u0000$%\u0005\u0004\u0000\u0000%(\u0003"+ - "\u0004\u0002\u0000&(\u0003\u0004\u0002\u0000\'#\u0001\u0000\u0000\u0000"+ - "\'&\u0001\u0000\u0000\u0000(.\u0001\u0000\u0000\u0000)*\n\u0003\u0000"+ - "\u0000*+\u0007\u0000\u0000\u0000+-\u0003\u0002\u0001\u0004,)\u0001\u0000"+ - "\u0000\u0000-0\u0001\u0000\u0000\u0000.,\u0001\u0000\u0000\u0000./\u0001"+ - "\u0000\u0000\u0000/\u0003\u0001\u0000\u0000\u00000.\u0001\u0000\u0000"+ - "\u000015\u0003\b\u0004\u000025\u0003\u0006\u0003\u000035\u0003\n\u0005"+ - "\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u000043\u0001\u0000"+ - "\u0000\u00005\u0005\u0001\u0000\u0000\u000069\u0003\u000e\u0007\u0000"+ - "79\u0003\f\u0006\u000086\u0001\u0000\u0000\u000087\u0001\u0000\u0000\u0000"+ - "9\u0007\u0001\u0000\u0000\u0000:;\u0003\u0010\b\u0000;<\u0005\u0005\u0000"+ - "\u0000<=\u0005\t\u0000\u0000=>\u0003\u0002\u0001\u0000>?\u0005\n\u0000"+ - "\u0000?\t\u0001\u0000\u0000\u0000@A\u0005\u0007\u0000\u0000AB\u0003\u0002"+ - "\u0001\u0000BC\u0005\b\u0000\u0000C\u000b\u0001\u0000\u0000\u0000DE\u0003"+ - "\u0010\b\u0000EF\u0005\u0006\u0000\u0000FG\u0003\u0012\t\u0000G\r\u0001"+ - "\u0000\u0000\u0000HI\u0003\u0010\b\u0000IJ\u0005\u0005\u0000\u0000JL\u0001"+ - "\u0000\u0000\u0000KH\u0001\u0000\u0000\u0000KL\u0001\u0000\u0000\u0000"+ - "LM\u0001\u0000\u0000\u0000MN\u0003\u0014\n\u0000N\u000f\u0001\u0000\u0000"+ - "\u0000OS\u0003\u001c\u000e\u0000PS\u0003\u0018\f\u0000QS\u0003\u001a\r"+ - "\u0000RO\u0001\u0000\u0000\u0000RP\u0001\u0000\u0000\u0000RQ\u0001\u0000"+ - "\u0000\u0000S\u0011\u0001\u0000\u0000\u0000TW\u0003\u0018\f\u0000UW\u0003"+ - "\u001a\r\u0000VT\u0001\u0000\u0000\u0000VU\u0001\u0000\u0000\u0000W\u0013"+ - "\u0001\u0000\u0000\u0000X]\u0003\u001c\u000e\u0000Y]\u0003\u001a\r\u0000"+ - "Z]\u0003\u0018\f\u0000[]\u0003\u0016\u000b\u0000\\X\u0001\u0000\u0000"+ - "\u0000\\Y\u0001\u0000\u0000\u0000\\Z\u0001\u0000\u0000\u0000\\[\u0001"+ - "\u0000\u0000\u0000]\u0015\u0001\u0000\u0000\u0000^_\u0005\u0007\u0000"+ - "\u0000_`\u0003\u0018\f\u0000`a\u0005\b\u0000\u0000a\u0017\u0001\u0000"+ - "\u0000\u0000bd\u0005\u000b\u0000\u0000cb\u0001\u0000\u0000\u0000de\u0001"+ - "\u0000\u0000\u0000ec\u0001\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000"+ - "f\u0019\u0001\u0000\u0000\u0000gh\u0005\f\u0000\u0000h\u001b\u0001\u0000"+ - "\u0000\u0000ij\u0005\r\u0000\u0000j\u001d\u0001\u0000\u0000\u0000\n\u001f"+ - "\'.48KRV\\e"; + "\u0004\u0001\u0010\u0087\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ + "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ + "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ + "\u0002\f\u0007\f\u0001\u0000\u0003\u0000\u001c\b\u0000\u0001\u0000\u0001"+ + "\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0003\u0001$\b"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001)\b\u0001\n\u0001"+ + "\f\u0001,\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001"+ + "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ + "\u0004\u0003\u0004?\b\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ + "\u0006\u0001\u0007\u0004\u0007L\b\u0007\u000b\u0007\f\u0007M\u0001\u0007"+ + "\u0003\u0007Q\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t"+ + "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0003"+ + "\ta\b\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\nh\b\n\u0001\u000b"+ + "\u0003\u000bk\b\u000b\u0001\u000b\u0004\u000bn\b\u000b\u000b\u000b\f\u000b"+ + "o\u0001\u000b\u0004\u000bs\b\u000b\u000b\u000b\f\u000bt\u0001\u000b\u0003"+ + "\u000bx\b\u000b\u0001\u000b\u0001\u000b\u0003\u000b|\b\u000b\u0001\f\u0004"+ + "\f\u007f\b\f\u000b\f\f\f\u0080\u0001\f\u0001\f\u0003\f\u0085\b\f\u0001"+ + "\f\u0000\u0001\u0002\r\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012"+ + "\u0014\u0016\u0018\u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006"+ + "\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u0091\u0000"+ + "\u001b\u0001\u0000\u0000\u0000\u0002#\u0001\u0000\u0000\u0000\u00044\u0001"+ + "\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b>\u0001\u0000\u0000"+ + "\u0000\nB\u0001\u0000\u0000\u0000\fF\u0001\u0000\u0000\u0000\u000eP\u0001"+ + "\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012`\u0001\u0000\u0000"+ + "\u0000\u0014g\u0001\u0000\u0000\u0000\u0016{\u0001\u0000\u0000\u0000\u0018"+ + "\u0084\u0001\u0000\u0000\u0000\u001a\u001c\u0003\u0002\u0001\u0000\u001b"+ + "\u001a\u0001\u0000\u0000\u0000\u001b\u001c\u0001\u0000\u0000\u0000\u001c"+ + "\u001d\u0001\u0000\u0000\u0000\u001d\u001e\u0005\u0000\u0000\u0001\u001e"+ + "\u0001\u0001\u0000\u0000\u0000\u001f \u0006\u0001\uffff\uffff\u0000 !"+ + "\u0005\u0004\u0000\u0000!$\u0003\u0004\u0002\u0000\"$\u0003\u0004\u0002"+ + "\u0000#\u001f\u0001\u0000\u0000\u0000#\"\u0001\u0000\u0000\u0000$*\u0001"+ + "\u0000\u0000\u0000%&\n\u0003\u0000\u0000&\'\u0007\u0000\u0000\u0000\'"+ + ")\u0003\u0002\u0001\u0003(%\u0001\u0000\u0000\u0000),\u0001\u0000\u0000"+ + "\u0000*(\u0001\u0000\u0000\u0000*+\u0001\u0000\u0000\u0000+\u0003\u0001"+ + "\u0000\u0000\u0000,*\u0001\u0000\u0000\u0000-5\u0003\u0006\u0003\u0000"+ + ".5\u0003\n\u0005\u0000/5\u0003\b\u0004\u000005\u0003\u0010\b\u000015\u0003"+ + "\f\u0006\u000025\u0003\u0012\t\u000035\u0003\u0014\n\u00004-\u0001\u0000"+ + "\u0000\u00004.\u0001\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001"+ + "\u0000\u0000\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u0000"+ + "43\u0001\u0000\u0000\u00005\u0005\u0001\u0000\u0000\u000067\u0003\u0018"+ + "\f\u000078\u0005\u0005\u0000\u000089\u0005\f\u0000\u00009:\u0003\u0002"+ + "\u0001\u0000:;\u0005\r\u0000\u0000;\u0007\u0001\u0000\u0000\u0000<=\u0005"+ + "\u0010\u0000\u0000=?\u0005\u0005\u0000\u0000><\u0001\u0000\u0000\u0000"+ + ">?\u0001\u0000\u0000\u0000?@\u0001\u0000\u0000\u0000@A\u0005\u0010\u0000"+ + "\u0000A\t\u0001\u0000\u0000\u0000BC\u0005\n\u0000\u0000CD\u0003\u0002"+ + "\u0001\u0000DE\u0005\u000b\u0000\u0000E\u000b\u0001\u0000\u0000\u0000"+ + "FG\u0003\u0018\f\u0000GH\u0007\u0001\u0000\u0000HI\u0003\u000e\u0007\u0000"+ + "I\r\u0001\u0000\u0000\u0000JL\u0007\u0002\u0000\u0000KJ\u0001\u0000\u0000"+ + "\u0000LM\u0001\u0000\u0000\u0000MK\u0001\u0000\u0000\u0000MN\u0001\u0000"+ + "\u0000\u0000NQ\u0001\u0000\u0000\u0000OQ\u0005\u000f\u0000\u0000PK\u0001"+ + "\u0000\u0000\u0000PO\u0001\u0000\u0000\u0000Q\u000f\u0001\u0000\u0000"+ + "\u0000RS\u0003\u0018\f\u0000ST\u0005\u0005\u0000\u0000TU\u0005\u0010\u0000"+ + "\u0000U\u0011\u0001\u0000\u0000\u0000VW\u0003\u0018\f\u0000WX\u0005\u0005"+ + "\u0000\u0000XY\u0003\u0016\u000b\u0000Ya\u0001\u0000\u0000\u0000Z[\u0003"+ + "\u0018\f\u0000[\\\u0005\u0005\u0000\u0000\\]\u0005\n\u0000\u0000]^\u0003"+ + "\u0016\u000b\u0000^_\u0005\u000b\u0000\u0000_a\u0001\u0000\u0000\u0000"+ + "`V\u0001\u0000\u0000\u0000`Z\u0001\u0000\u0000\u0000a\u0013\u0001\u0000"+ + "\u0000\u0000bh\u0003\u0016\u000b\u0000cd\u0005\n\u0000\u0000de\u0003\u0016"+ + "\u000b\u0000ef\u0005\u000b\u0000\u0000fh\u0001\u0000\u0000\u0000gb\u0001"+ + "\u0000\u0000\u0000gc\u0001\u0000\u0000\u0000h\u0015\u0001\u0000\u0000"+ + "\u0000ik\u0007\u0000\u0000\u0000ji\u0001\u0000\u0000\u0000jk\u0001\u0000"+ + "\u0000\u0000km\u0001\u0000\u0000\u0000ln\u0007\u0002\u0000\u0000ml\u0001"+ + "\u0000\u0000\u0000no\u0001\u0000\u0000\u0000om\u0001\u0000\u0000\u0000"+ + "op\u0001\u0000\u0000\u0000p|\u0001\u0000\u0000\u0000qs\u0007\u0002\u0000"+ + "\u0000rq\u0001\u0000\u0000\u0000st\u0001\u0000\u0000\u0000tr\u0001\u0000"+ + "\u0000\u0000tu\u0001\u0000\u0000\u0000uw\u0001\u0000\u0000\u0000vx\u0007"+ + "\u0000\u0000\u0000wv\u0001\u0000\u0000\u0000wx\u0001\u0000\u0000\u0000"+ + "x|\u0001\u0000\u0000\u0000y|\u0007\u0003\u0000\u0000z|\u0005\u000f\u0000"+ + "\u0000{j\u0001\u0000\u0000\u0000{r\u0001\u0000\u0000\u0000{y\u0001\u0000"+ + "\u0000\u0000{z\u0001\u0000\u0000\u0000|\u0017\u0001\u0000\u0000\u0000"+ + "}\u007f\u0005\u000e\u0000\u0000~}\u0001\u0000\u0000\u0000\u007f\u0080"+ + "\u0001\u0000\u0000\u0000\u0080~\u0001\u0000\u0000\u0000\u0080\u0081\u0001"+ + "\u0000\u0000\u0000\u0081\u0085\u0001\u0000\u0000\u0000\u0082\u0085\u0005"+ + "\u000f\u0000\u0000\u0083\u0085\u0005\u0010\u0000\u0000\u0084~\u0001\u0000"+ + "\u0000\u0000\u0084\u0082\u0001\u0000\u0000\u0000\u0084\u0083\u0001\u0000"+ + "\u0000\u0000\u0085\u0019\u0001\u0000\u0000\u0000\u0010\u001b#*4>MP`gj"+ + "otw{\u0080\u0084"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 55fa21f0e899d..67253e4364190 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -51,12 +51,6 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitExpression(KqlBaseParser.ExpressionContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -64,29 +58,23 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Visit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldName(KqlBaseParser.FieldNameContext ctx); + T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -94,33 +82,33 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Visit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree * @return the visitor result */ - T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree * @return the visitor result */ - T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + T visitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index d750f16149112..d9378cf9041c2 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -68,6 +68,15 @@ foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo OR (foo_field:foo bar OR foo bar) +foo:AND +foo:OR +foo:NOT +foo AND +foo OR +AND foo +OR foo +NOT + // Nested queries nested_field: { NOT foo } nested_field: { NOT foo bar } diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 545b03576b331..97a26f16db141 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -16,14 +16,6 @@ NOT (foo_field:foo AND) foo_field:foo bar foo_field: "foo bar foo_field: foo bar" - -// Invalid boolean queries -foo AND -AND foo -foo OR -OR foo -NOT foo: - // Can't nest grouping terms parentheses foo_field:(foo (bar)) From 999274c003cc165c2634cbf94b9bad354239e22d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:43:28 +0200 Subject: [PATCH 149/202] Cleanup HotThreadsIT (example of test cleanup) (#115601) Just a quick example of how to save quite a few lines of code and make a test easier to reason about. --- .../action/admin/HotThreadsIT.java | 54 ++++++------------- 1 file changed, 15 insertions(+), 39 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 20c10c3d8c1f9..8c80cee58f46c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; @@ -26,15 +26,14 @@ import org.hamcrest.Matcher; import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -44,11 +43,10 @@ public class HotThreadsIT extends ESIntegTestCase { - public void testHotThreadsDontFail() throws InterruptedException { + public void testHotThreadsDontFail() throws InterruptedException, ExecutionException { // This test just checks if nothing crashes or gets stuck etc. createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); - final AtomicBoolean hasErrors = new AtomicBoolean(false); for (int i = 0; i < iters; i++) { final NodesHotThreadsRequest request = new NodesHotThreadsRequest( Strings.EMPTY_ARRAY, @@ -67,36 +65,7 @@ public void testHotThreadsDontFail() throws InterruptedException { randomBoolean() ) ); - final CountDownLatch latch = new CountDownLatch(1); - client().execute(TransportNodesHotThreadsAction.TYPE, request, new ActionListener<>() { - @Override - public void onResponse(NodesHotThreadsResponse nodeHotThreads) { - boolean success = false; - try { - assertThat(nodeHotThreads, notNullValue()); - Map nodesMap = nodeHotThreads.getNodesMap(); - assertThat(nodeHotThreads.failures(), empty()); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (NodeHotThreads ht : nodeHotThreads.getNodes()) { - assertNotNull(ht.getHotThreads()); - } - success = true; - } finally { - if (success == false) { - hasErrors.set(true); - } - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - logger.error("FAILED", e); - hasErrors.set(true); - latch.countDown(); - fail(); - } - }); + final ActionFuture hotThreadsFuture = client().execute(TransportNodesHotThreadsAction.TYPE, request); indexRandom( true, @@ -105,7 +74,7 @@ public void onFailure(Exception e) { prepareIndex("test").setId("3").setSource("field1", "value3") ); ensureSearchable(); - while (latch.getCount() > 0) { + while (hotThreadsFuture.isDone() == false) { assertHitCount( prepareSearch().setQuery(matchAllQuery()) .setPostFilter( @@ -115,8 +84,15 @@ public void onFailure(Exception e) { 3L ); } - safeAwait(latch); - assertThat(hasErrors.get(), is(false)); + assertResponse(hotThreadsFuture, nodeHotThreads -> { + assertThat(nodeHotThreads, notNullValue()); + Map nodesMap = nodeHotThreads.getNodesMap(); + assertThat(nodeHotThreads.failures(), empty()); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (NodeHotThreads ht : nodeHotThreads.getNodes()) { + assertNotNull(ht.getHotThreads()); + } + }); } } From e3523c159106255a96b8c00339f6c565b69c266a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:01:41 +0200 Subject: [PATCH 150/202] [DOCS] Fix link syntax in connectors-API-tutorial.asciidoc (#115635) --- docs/reference/connector/docs/connectors-API-tutorial.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc index 5275f82de1b1f..4118c564e4759 100644 --- a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc +++ b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc @@ -367,7 +367,7 @@ Refer to the individual connectors-references,connector references for these con ==== We're using a self-managed connector in this tutorial. To use these APIs with an Elastic managed connector, there's some extra setup for API keys. -Refer to native-connectors-manage-API-keys for details. +Refer to <> for details. ==== We're now ready to sync our PostgreSQL data to {es}. From 6e0bdbec0ade4af2b5d130aee6bf9e76a64f0e19 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 13:38:35 +0200 Subject: [PATCH 151/202] Fixed flaky test after PR that disallows functions to return TEXT (#115633) * Fixed flaky test after PR that disallows functions to return TEXT * Also ignore TEXT/KEYWORD combinations because they are now valid * Unmute the test --- muted-tests.yml | 3 --- .../elasticsearch/xpack/esql/analysis/AnalyzerTests.java | 8 +++++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4869b669f6220..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,9 +282,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests - method: testMvAppendValidation - issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index c18f55a651408..b86935dcd03da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -56,6 +56,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -1879,6 +1880,11 @@ public void testMvAppendValidation() { Supplier supplier = () -> randomInt(fields.length - 1); int first = supplier.get(); int second = randomValueOtherThan(first, supplier); + Function noText = (type) -> type.equals("text") ? "keyword" : type; + assumeTrue( + "Ignore tests with TEXT and KEYWORD combinations because they are now valid", + noText.apply(fields[first][0]).equals(noText.apply(fields[second][0])) == false + ); String signature = "mv_append(" + fields[first][0] + ", " + fields[second][0] + ")"; verifyUnsupported( @@ -1886,7 +1892,7 @@ public void testMvAppendValidation() { "second argument of [" + signature + "] must be [" - + fields[first][1] + + noText.apply(fields[first][1]) + "], found value [" + fields[second][0] + "] type [" From aabbc840a56e70b1d6b0f1221e3aaf9c4a54c08e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 23:22:55 +1100 Subject: [PATCH 152/202] Mute org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT testCCSClusterDetailsWhereAllShardsSkippedInCanMatch #115652 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5c94c0aff60b6..f10c214be26bf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 +- class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT + method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch + issue: https://github.com/elastic/elasticsearch/issues/115652 # Examples: # From 9adbebb123c875765624ca1fe85f4aeb118287fb Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Fri, 25 Oct 2024 09:16:54 -0400 Subject: [PATCH 153/202] [ML] Fix streaming IT (#115543) Fix #113430 --- muted-tests.yml | 3 --- .../org/elasticsearch/xpack/inference/InferenceCrudIT.java | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f10c214be26bf..20879ed327781 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -164,9 +164,6 @@ tests: - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/113428 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testSupportedStream - issue: https://github.com/elastic/elasticsearch/issues/113430 - class: org.elasticsearch.integration.KibanaUserRoleIntegTests method: testFieldMappings issue: https://github.com/elastic/elasticsearch/issues/113592 diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 37de2caadb475..53c82219e2f12 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -307,8 +307,7 @@ public void testSupportedStream() throws Exception { assertEquals(modelId, singleModel.get("inference_id")); assertEquals(TaskType.COMPLETION.toString(), singleModel.get("task_type")); - var input = IntStream.range(1, randomInt(10)).mapToObj(i -> randomAlphaOfLength(10)).toList(); - + var input = IntStream.range(1, 2 + randomInt(8)).mapToObj(i -> randomAlphaOfLength(10)).toList(); try { var events = streamInferOnMockService(modelId, TaskType.COMPLETION, input); From 6cec96cc1e208e50518949edf3b1840ddd012dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 25 Oct 2024 15:44:59 +0200 Subject: [PATCH 154/202] Fix TimeSeriesRateAggregatorTests file leak (#115278) With Lucene 10, IndexWriter requires a parent document field in order to use index sorting with document blocks. This lead to different IAE and file leaks in this test which are fixed by adapting the corresponding location in the test setup. --- .../search/aggregations/AggregatorTestCase.java | 2 ++ .../rate/TimeSeriesRateAggregatorTests.java | 15 ++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 5f64d123c1bed..d6709b00b4dbb 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -81,6 +81,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -749,6 +750,7 @@ protected void tes new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) ); + config.setParentField(Engine.ROOT_DOC_FIELD_NAME); config.setIndexSort(sort); } RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index 753ce8283afca..3c7a18de536bc 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -42,6 +42,7 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; public class TimeSeriesRateAggregatorTests extends AggregatorTestCase { @@ -155,14 +156,14 @@ public void testNestedWithinAutoDateHistogram() throws IOException { AggTestConfig aggTestConfig = new AggTestConfig(tsBuilder, timeStampField(), counterField("counter_field")) .withSplitLeavesIntoSeperateAggregators(false); - expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { - for (Document document : docs(2000, "1", 15, 37, 60, /*reset*/ 14)) { - iw.addDocument(document); - } - for (Document document : docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)) { - iw.addDocument(document); - } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { + iw.addDocuments(docs(2000, "1", 15, 37, 60, /*reset*/ 14)); + iw.addDocuments(docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)); }, verifier, aggTestConfig)); + assertThat( + e.getMessage(), + startsWith("Wrapping a time-series rate aggregation within a DeferableBucketAggregator is not supported.") + ); } private List docs(long startTimestamp, String dim, long... values) throws IOException { From d4ac705d57ff19685703738883c595392547e399 Mon Sep 17 00:00:00 2001 From: John Wagster Date: Fri, 25 Oct 2024 09:26:51 -0500 Subject: [PATCH 155/202] [CI] MixedClusterClientYamlTestSuiteIT test {p0=range/20_synthetic_source/Date range} failing - Removed Old `Date range` test because it's not longer validating useful code (#114057) unmuting test and removing bwc test to get mixedClusterTest working --- muted-tests.yml | 3 - .../test/range/20_synthetic_source.yml | 134 ------------------ 2 files changed, 137 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 20879ed327781..70f29016d8475 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -191,9 +191,6 @@ tests: - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=range/20_synthetic_source/Date range} - issue: https://github.com/elastic/elasticsearch/issues/113874 - class: org.elasticsearch.kibana.KibanaThreadPoolIT method: testBlockedThreadPoolsRejectUserRequests issue: https://github.com/elastic/elasticsearch/issues/113939 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml index cc92b52e0887a..de20f82f8ba2f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/20_synthetic_source.yml @@ -525,140 +525,6 @@ setup: _source: ip_range: { "gte": "2001:db8::", "lte": null } ---- -"Date range": - - skip: - cluster_features: ["mapper.range.date_range_indexing_fix"] - reason: "tests prior to rounding fixes in 8.16.0 that caused non-intuitive indexing and query because ranges were assumed to always index with 0's as the default such as when time is missing 00:00:00.000 time was assumed but for lte indexing and query missing time should be 23:59:59.999 as per docs here: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html" - - - do: - index: - index: synthetic_source_test - id: "1" - body: { "date_range" : { "gte": "2017-09-01", "lte": "2017-09-05" } } - - - do: - index: - index: synthetic_source_test - id: "2" - body: { "date_range" : { "gt": "2017-09-01", "lte": "2017-09-03" } } - - - do: - index: - index: synthetic_source_test - id: "3" - body: { "date_range" : [ { "gte": "2017-09-04", "lt": "2017-09-05" } ] } - - - do: - index: - index: synthetic_source_test - id: "4" - body: { "date_range" : [ { "gt": "2017-09-04", "lt": "2017-09-08" }, { "gt": "2017-09-04", "lt": "2017-09-07" } ] } - - - do: - index: - index: synthetic_source_test - id: "5" - body: { "date_range" : { "gte": 1504224000000, "lte": 1504569600000 } } - - - do: - index: - index: synthetic_source_test - id: "6" - body: { "date_range" : { "gte": "2017-09-01T10:20:30.123Z", "lte": "2017-09-05T03:04:05.789Z" } } - - - do: - index: - index: synthetic_source_test - id: "7" - body: { "date_range" : null } - - - do: - index: - index: synthetic_source_test - id: "8" - body: { "date_range": { "gte": null, "lte": "2017-09-05" } } - - - do: - index: - index: synthetic_source_test - id: "9" - body: { "date_range": { "gte": "2017-09-05" } } - - - do: - indices.refresh: {} - - - do: - get: - index: synthetic_source_test - id: "1" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.000Z", "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "2" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.001Z", "lte": "2017-09-03T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "3" - - match: - _source: - date_range: { "gte": "2017-09-04T00:00:00.000Z", "lte": "2017-09-04T23:59:59.999Z" } - - - do: - get: - index: synthetic_source_test - id: "4" - - match: - _source: - date_range: [ { "gte": "2017-09-04T00:00:00.001Z", "lte": "2017-09-06T23:59:59.999Z" }, { "gte": "2017-09-04T00:00:00.001Z", "lte": "2017-09-07T23:59:59.999Z" } ] - - - do: - get: - index: synthetic_source_test - id: "5" - - match: - _source: - date_range: { "gte": "2017-09-01T00:00:00.000Z", "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "6" - - match: - _source: - date_range: { "gte": "2017-09-01T10:20:30.123Z", "lte": "2017-09-05T03:04:05.789Z" } - - - do: - get: - index: synthetic_source_test - id: "7" - - match: - _source: {} - - - do: - get: - index: synthetic_source_test - id: "8" - - match: - _source: - date_range: { "gte": null, "lte": "2017-09-05T00:00:00.000Z" } - - - do: - get: - index: synthetic_source_test - id: "9" - - match: - _source: - date_range: { "gte": "2017-09-05T00:00:00.000Z", "lte": null } - --- "Date range Rounding Fixes": - requires: From 0b94bb8a75076213f7ac7029d21a57ca2ebf93a1 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 16:51:02 +0200 Subject: [PATCH 156/202] Slightly more generous assertions for Cartesian tests (#115658) --- .../elasticsearch/lucene/spatial/CentroidCalculatorTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java index 8216d092bd683..caf4494986f6d 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CentroidCalculatorTests.java @@ -428,7 +428,7 @@ private Matcher matchDouble(double value) { // Most data (notably geo data) has values within bounds, and an absolute delta makes more sense. double delta = (value > 1e28 || value < -1e28) ? Math.abs(value / 1e6) : (value > 1e20 || value < -1e20) ? Math.abs(value / 1e10) - : (value > 1e9 || value < -1e9) ? Math.abs(value / 1e15) + : (value > 1e8 || value < -1e8) ? Math.abs(value / 1e15) : DELTA; return closeTo(value, delta); } From bb4a444edef301f263210209636ee46fb9d32d80 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 25 Oct 2024 17:17:30 +0200 Subject: [PATCH 157/202] [Gradle] Fix packaging tests after removing cloud docker image (#115654) --- .../InternalDistributionDownloadPlugin.java | 3 --- ...kerCloudElasticsearchDistributionType.java | 27 ------------------- ...nternalElasticsearchDistributionTypes.java | 2 -- .../internal/test/DistroTestPlugin.java | 2 -- 4 files changed, 34 deletions(-) delete mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 19309fe2da8a3..0bf4bcb33c23b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -172,9 +172,6 @@ private static String distributionProjectName(ElasticsearchDistribution distribu if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_IRONBANK) { return projectName + "ironbank-docker" + archString + "-export"; } - if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD) { - return projectName + "cloud-docker" + archString + "-export"; - } if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS) { return projectName + "cloud-ess-docker" + archString + "-export"; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java deleted file mode 100644 index eb522dbcad5e2..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerCloudElasticsearchDistributionType.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.gradle.internal.distribution; - -import org.elasticsearch.gradle.ElasticsearchDistributionType; - -public class DockerCloudElasticsearchDistributionType implements ElasticsearchDistributionType { - - DockerCloudElasticsearchDistributionType() {} - - @Override - public String getName() { - return "dockerCloud"; - } - - @Override - public boolean isDocker() { - return true; - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java index ba0e76b3f5b99..8f0951da86b88 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java @@ -19,7 +19,6 @@ public class InternalElasticsearchDistributionTypes { public static ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_UBI = new DockerUbiElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_CLOUD = new DockerCloudElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); @@ -29,7 +28,6 @@ public class InternalElasticsearchDistributionTypes { DOCKER, DOCKER_UBI, DOCKER_IRONBANK, - DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 77ab9557eac33..8e7884888b63b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -49,7 +49,6 @@ import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.ALL_INTERNAL; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DEB; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; -import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_IRONBANK; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI; @@ -149,7 +148,6 @@ private static Map> lifecycleTask lifecyleTasks.put(DOCKER, project.getTasks().register(taskPrefix + ".docker")); lifecyleTasks.put(DOCKER_UBI, project.getTasks().register(taskPrefix + ".docker-ubi")); lifecyleTasks.put(DOCKER_IRONBANK, project.getTasks().register(taskPrefix + ".docker-ironbank")); - lifecyleTasks.put(DOCKER_CLOUD, project.getTasks().register(taskPrefix + ".docker-cloud")); lifecyleTasks.put(DOCKER_CLOUD_ESS, project.getTasks().register(taskPrefix + ".docker-cloud-ess")); lifecyleTasks.put(DOCKER_WOLFI, project.getTasks().register(taskPrefix + ".docker-wolfi")); lifecyleTasks.put(ARCHIVE, project.getTasks().register(taskPrefix + ".archives")); From e82e6af50517e35400d5438a3932a5b6b478b8d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 17:35:48 +0200 Subject: [PATCH 158/202] [DOCS] Documents configurable chunking (#115300) Co-authored-by: David Kyle --- .../inference/inference-apis.asciidoc | 62 ++++++++++++++++++- .../inference/inference-shared.asciidoc | 34 +++++++++- .../service-alibabacloud-ai-search.asciidoc | 21 ++++++- .../inference/service-amazon-bedrock.asciidoc | 20 ++++++ .../inference/service-anthropic.asciidoc | 20 ++++++ .../service-azure-ai-studio.asciidoc | 20 ++++++ .../inference/service-azure-openai.asciidoc | 20 ++++++ .../inference/service-cohere.asciidoc | 20 ++++++ .../inference/service-elasticsearch.asciidoc | 20 ++++++ .../inference/service-elser.asciidoc | 20 ++++++ .../service-google-ai-studio.asciidoc | 20 ++++++ .../service-google-vertex-ai.asciidoc | 20 ++++++ .../inference/service-hugging-face.asciidoc | 20 ++++++ .../inference/service-mistral.asciidoc | 20 ++++++ .../inference/service-openai.asciidoc | 20 ++++++ 15 files changed, 354 insertions(+), 3 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 1206cb02ba89a..38afc7c416f18 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -35,7 +35,6 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. - [discrete] [[default-enpoints]] === Default {infer} endpoints @@ -53,6 +52,67 @@ For these models, the minimum number of allocations is `0`. If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. +[discrete] +[[infer-chunking-config]] +=== Configuring chunking + +{infer-cap} endpoints have a limit on the amount of text they can process at once, determined by the model's input capacity. +Chunking is the process of splitting the input text into pieces that remain within these limits. +It occurs when ingesting documents into <>. +Chunking also helps produce sections that are digestible for humans. +Returning a long document in search results is less useful than providing the most relevant chunk of text. + +Each chunk will include the text subpassage and the corresponding embedding generated from it. + +By default, documents are split into sentences and grouped in sections up to 250 words with 1 sentence overlap so that each chunk shares a sentence with the previous chunk. +Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. + +{es} uses the https://unicode-org.github.io/icu-docs/[ICU4J] library to detect word and sentence boundaries for chunking. +https://unicode-org.github.io/icu/userguide/boundaryanalysis/#word-boundary[Word boundaries] are identified by following a series of rules, not just the presence of a whitespace character. +For written languages that do use whitespace such as Chinese or Japanese dictionary lookups are used to detect word boundaries. + + +[discrete] +==== Chunking strategies + +Two strategies are available for chunking: `sentence` and `word`. + +The `sentence` strategy splits the input text at sentence boundaries. +Each chunk contains one or more complete sentences ensuring that the integrity of sentence-level context is preserved, except when a sentence causes a chunk to exceed a word count of `max_chunk_size`, in which case it will be split across chunks. +The `sentence_overlap` option defines the number of sentences from the previous chunk to include in the current chunk which is either `0` or `1`. + +The `word` strategy splits the input text on individual words up to the `max_chunk_size` limit. +The `overlap` option is the number of words from the previous chunk to include in the current chunk. + +The default chunking strategy is `sentence`. + +NOTE: The default chunking strategy for {infer} endpoints created before 8.16 is `word`. + + +[discrete] +==== Example of configuring the chunking behavior + +The following example creates an {infer} endpoint with the `elasticsearch` service that deploys the ELSER model by default and configures the chunking behavior. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/small_chunk_size +{ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + }, + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 100, + "sentence_overlap": 0 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + + include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc index 2eafa3434e89e..da497c6581e5d 100644 --- a/docs/reference/inference/inference-shared.asciidoc +++ b/docs/reference/inference/inference-shared.asciidoc @@ -31,4 +31,36 @@ end::task-settings[] tag::task-type[] The type of the {infer} task that the model will perform. -end::task-type[] \ No newline at end of file +end::task-type[] + +tag::chunking-settings[] +Chunking configuration object. +Refer to <> to learn more about chunking. +end::chunking-settings[] + +tag::chunking-settings-max-chunking-size[] +Specifies the maximum size of a chunk in words. +Defaults to `250`. +This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). +end::chunking-settings-max-chunking-size[] + +tag::chunking-settings-overlap[] +Only for `word` chunking strategy. +Specifies the number of overlapping words for chunks. +Defaults to `100`. +This value cannot be higher than the half of `max_chunking_size`. +end::chunking-settings-overlap[] + +tag::chunking-settings-sentence-overlap[] +Only for `sentence` chunking strategy. +Specifies the numnber of overlapping sentences for chunks. +It can be either `1` or `0`. +Defaults to `1`. +end::chunking-settings-sentence-overlap[] + +tag::chunking-settings-strategy[] +Specifies the chunking strategy. +It could be either `sentence` or `word`. +end::chunking-settings-strategy[] + + diff --git a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc index 0607b56b528ea..c3ff40a39cd86 100644 --- a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc +++ b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc @@ -34,6 +34,26 @@ Available task types: [[infer-service-alibabacloud-ai-search-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, @@ -108,7 +128,6 @@ To modify this, set the `requests_per_minute` setting of this object in your ser include::inference-shared.asciidoc[tag=request-per-minute-example] -- - `task_settings`:: (Optional, object) include::inference-shared.asciidoc[tag=task-settings] diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc index dbffd5c26fbcc..761777e32f8e0 100644 --- a/docs/reference/inference/service-amazon-bedrock.asciidoc +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-amazon-bedrock-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-anthropic.asciidoc b/docs/reference/inference/service-anthropic.asciidoc index 41419db7a6069..7fb3d1d5bea34 100644 --- a/docs/reference/inference/service-anthropic.asciidoc +++ b/docs/reference/inference/service-anthropic.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-anthropic-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-azure-ai-studio.asciidoc b/docs/reference/inference/service-azure-ai-studio.asciidoc index 0d711a0d6171f..dd13a3e59aae5 100644 --- a/docs/reference/inference/service-azure-ai-studio.asciidoc +++ b/docs/reference/inference/service-azure-ai-studio.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-azure-ai-studio-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-azure-openai.asciidoc b/docs/reference/inference/service-azure-openai.asciidoc index 6f03c5966d9e6..b134e2b687f6c 100644 --- a/docs/reference/inference/service-azure-openai.asciidoc +++ b/docs/reference/inference/service-azure-openai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-azure-openai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc index 84eae6e880617..1a815e3c45f36 100644 --- a/docs/reference/inference/service-cohere.asciidoc +++ b/docs/reference/inference/service-cohere.asciidoc @@ -34,6 +34,26 @@ Available task types: [[infer-service-cohere-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 259779a12134d..0103b425faefe 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -36,6 +36,26 @@ Available task types: [[infer-service-elasticsearch-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 521fab0375584..273d743e47a4b 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -36,6 +36,26 @@ Available task types: [[infer-service-elser-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-google-ai-studio.asciidoc b/docs/reference/inference/service-google-ai-studio.asciidoc index 25aa89cd49110..738fce3d53e9b 100644 --- a/docs/reference/inference/service-google-ai-studio.asciidoc +++ b/docs/reference/inference/service-google-ai-studio.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-google-ai-studio-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-google-vertex-ai.asciidoc b/docs/reference/inference/service-google-vertex-ai.asciidoc index 640553ab74626..34e14e05e072a 100644 --- a/docs/reference/inference/service-google-vertex-ai.asciidoc +++ b/docs/reference/inference/service-google-vertex-ai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-google-vertex-ai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-hugging-face.asciidoc b/docs/reference/inference/service-hugging-face.asciidoc index 177a15177d21f..6d8667351a6b4 100644 --- a/docs/reference/inference/service-hugging-face.asciidoc +++ b/docs/reference/inference/service-hugging-face.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-hugging-face-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-mistral.asciidoc b/docs/reference/inference/service-mistral.asciidoc index 077e610191705..244381d107161 100644 --- a/docs/reference/inference/service-mistral.asciidoc +++ b/docs/reference/inference/service-mistral.asciidoc @@ -32,6 +32,26 @@ Available task types: [[infer-service-mistral-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc index 075e76dc7d741..21643133553e1 100644 --- a/docs/reference/inference/service-openai.asciidoc +++ b/docs/reference/inference/service-openai.asciidoc @@ -33,6 +33,26 @@ Available task types: [[infer-service-openai-api-request-body]] ==== {api-request-body-title} +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + `service`:: (Required, string) The type of service supported for the specified task type. In this case, From 3b5bd62467456db5eaefbf7cd72ce324eb7dbb49 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 25 Oct 2024 09:06:11 -0700 Subject: [PATCH 159/202] Add tests for license changes while using data streams (#115478) --- .../logsdb/DataStreamLicenceDowngradeIT.java | 489 ++++++++++++++++++ .../logsdb/DataStreamLicenseChangeIT.java | 107 ++++ .../logsdb/DataStreamLicenseUpgradeIT.java | 487 +++++++++++++++++ 3 files changed, 1083 insertions(+) create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java new file mode 100644 index 0000000000000..f004189098c43 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenceDowngradeIT.java @@ -0,0 +1,489 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.index.mapper.SourceFieldMapper; + +import java.io.IOException; +import java.util.List; + +public class DataStreamLicenceDowngradeIT extends DataStreamLicenseChangeIT { + @Override + protected void applyInitialLicense() throws IOException { + startTrial(); + } + + @Override + protected void licenseChange() throws IOException { + startBasic(); + } + + @Override + protected List cases() { + return List.of(new TestCase() { + @Override + public String dataStreamName() { + return "logs-test-regular"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-synthetic"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-stored"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-regular"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"] + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-regular-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-regular"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-regular-component"] + } + """; + + putTemplate(client(), "tsdb-test-regular-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-synthetic"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "SYNTHETIC" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-synthetic-component"] + } + """; + + putTemplate(client(), "tsdb-test-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-stored"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "STORED" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-stored-component"] + } + """; + + putTemplate(client(), "tsdb-test-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + + new TestCase() { + @Override + public String dataStreamName() { + return "standard"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var template = """ + { + "index_patterns": ["standard"], + "priority": 100, + "data_stream": {}, + "composed_of": [] + } + """; + + putTemplate(client(), "standard-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-synthetic"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-synthetic-component"] + } + """; + + putTemplate(client(), "standard-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-stored"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-stored-component"] + } + """; + + putTemplate(client(), "standard-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + } + ); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java new file mode 100644 index 0000000000000..b84c982766e4b --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseChangeIT.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.List; + +public abstract class DataStreamLicenseChangeIT extends LogsIndexModeRestTestIT { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .module("data-streams") + .module("x-pack-stack") + .setting("cluster.logsdb.enabled", "true") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "basic") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + protected interface TestCase { + String dataStreamName(); + + void prepareDataStream() throws IOException; + + String indexMode(); + + SourceFieldMapper.Mode initialMode(); + + SourceFieldMapper.Mode finalMode(); + + void rollover() throws IOException; + } + + protected abstract void licenseChange() throws IOException; + + protected abstract void applyInitialLicense() throws IOException; + + protected abstract List cases(); + + public void testLicenseChange() throws IOException { + applyInitialLicense(); + + for (var testCase : cases()) { + testCase.prepareDataStream(); + + var indexMode = (String) getSetting(client(), getDataStreamBackingIndex(client(), testCase.dataStreamName(), 0), "index.mode"); + assertEquals(testCase.indexMode(), indexMode); + + var sourceMode = (String) getSetting( + client(), + getDataStreamBackingIndex(client(), testCase.dataStreamName(), 0), + "index.mapping.source.mode" + ); + assertEquals(testCase.initialMode().toString(), sourceMode); + } + + licenseChange(); + + for (var testCase : cases()) { + testCase.rollover(); + + var indexMode = (String) getSetting(client(), getDataStreamBackingIndex(client(), testCase.dataStreamName(), 1), "index.mode"); + assertEquals(testCase.indexMode(), indexMode); + + var sourceMode = (String) getSetting( + client(), + getDataStreamBackingIndex(client(), testCase.dataStreamName(), 1), + "index.mapping.source.mode" + ); + assertEquals(testCase.finalMode().toString(), sourceMode); + } + } + + protected static void startBasic() throws IOException { + Request startTrial = new Request("POST", "/_license/start_basic"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + protected static void startTrial() throws IOException { + Request startTrial = new Request("POST", "/_license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + protected static Response removeComponentTemplate(final RestClient client, final String componentTemplate) throws IOException { + final Request request = new Request("DELETE", "/_component_template/" + componentTemplate); + return client.performRequest(request); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java new file mode 100644 index 0000000000000..bce43ca046523 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/DataStreamLicenseUpgradeIT.java @@ -0,0 +1,487 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.index.mapper.SourceFieldMapper; + +import java.io.IOException; +import java.util.List; + +public class DataStreamLicenseUpgradeIT extends DataStreamLicenseChangeIT { + @Override + protected void applyInitialLicense() {} + + @Override + protected void licenseChange() throws IOException { + startTrial(); + } + + @Override + protected List cases() { + return List.of(new TestCase() { + @Override + public String dataStreamName() { + return "logs-test-regular"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-synthetic"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + private static final String sourceModeOverride = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + }"""; + + @Override + public String dataStreamName() { + return "logs-test-explicit-stored"; + } + + @Override + public String indexMode() { + return "logsdb"; + } + + @Override + public void prepareDataStream() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + assertOK(createDataStream(client(), dataStreamName())); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public void rollover() throws IOException { + assertOK(putComponentTemplate(client(), "logs@custom", sourceModeOverride)); + rolloverDataStream(client(), dataStreamName()); + assertOK(removeComponentTemplate(client(), "logs@custom")); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-regular"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"] + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-regular-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-regular"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-regular-component"] + } + """; + + putTemplate(client(), "tsdb-test-regular-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-synthetic"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "SYNTHETIC" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-synthetic-component"] + } + """; + + putTemplate(client(), "tsdb-test-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, new TestCase() { + @Override + public String dataStreamName() { + return "tsdb-test-stored"; + } + + @Override + public String indexMode() { + return "time_series"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "mapping.source.mode": "STORED" + } + }, + "mappings": { + "properties": { + "dim": { + "type": "keyword", + "time_series_dimension": true + } + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "tsdb-test-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["tsdb-test-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["tsdb-test-stored-component"] + } + """; + + putTemplate(client(), "tsdb-test-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + + new TestCase() { + @Override + public String dataStreamName() { + return "standard"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var template = """ + { + "index_patterns": ["standard"], + "priority": 100, + "data_stream": {}, + "composed_of": [] + } + """; + + putTemplate(client(), "standard-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-synthetic"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "SYNTHETIC" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-synthetic-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-synthetic"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-synthetic-component"] + } + """; + + putTemplate(client(), "standard-synthetic-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.SYNTHETIC; + } + }, + new TestCase() { + @Override + public String dataStreamName() { + return "standard-stored"; + } + + @Override + public String indexMode() { + return "standard"; + } + + @Override + public void prepareDataStream() throws IOException { + var componentTemplate = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "STORED" + } + } + } + } + """; + assertOK(putComponentTemplate(client(), "standard-stored-component", componentTemplate)); + + var template = """ + { + "index_patterns": ["standard-stored"], + "priority": 100, + "data_stream": {}, + "composed_of": ["standard-stored-component"] + } + """; + + putTemplate(client(), "standard-stored-template", template); + assertOK(createDataStream(client(), dataStreamName())); + } + + @Override + public void rollover() throws IOException { + rolloverDataStream(client(), dataStreamName()); + } + + @Override + public SourceFieldMapper.Mode initialMode() { + return SourceFieldMapper.Mode.STORED; + } + + @Override + public SourceFieldMapper.Mode finalMode() { + return SourceFieldMapper.Mode.STORED; + } + } + ); + } +} From 29d1d9e6e034df4a40f5357e03c30d0cccb51afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 25 Oct 2024 19:13:34 +0200 Subject: [PATCH 160/202] Implement string parsing for the KQL parser. (#115662) --- .../xpack/kql/parser/ParserUtils.java | 254 ++++++++++++++++ .../xpack/kql/parser/ParserUtilsTests.java | 280 ++++++++++++++++++ 2 files changed, 534 insertions(+) create mode 100644 x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java create mode 100644 x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java new file mode 100644 index 0000000000000..f996a953ea7f7 --- /dev/null +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/ParserUtils.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.apache.logging.log4j.util.Strings; +import org.apache.lucene.queryparser.classic.QueryParser; + +import java.util.ArrayList; +import java.util.List; + +/** + * Utility class for parsing and processing KQL expressions. + * Provides methods for type-safe parsing, text extraction, and string escaping/unescaping. + */ +public final class ParserUtils { + + private static final String UNQUOTED_LITERAL_TERM_DELIMITER = " "; + private static final char ESCAPE_CHAR = '\\'; + private static final char QUOTE_CHAR = '"'; + private static final char WILDCARD_CHAR = '*'; + + private ParserUtils() { + throw new UnsupportedOperationException("No need to instantiate this class"); + } + + /** + * Performs type-safe parsing using the provided visitor. + * + * @param visitor The visitor to use to do the parsing + * @param ctx The parser tree context to visit + * @param type The expected return type class + * @return The parsed result, casted to the expected type + */ + @SuppressWarnings("unchecked") + public static T typedParsing(ParseTreeVisitor visitor, ParserRuleContext ctx, Class type) { + Object result = ctx.accept(visitor); + + if (type.isInstance(result)) { + return (T) result; + } + + throw new KqlParsingException( + "Invalid query '{}'[{}] given; expected {} but found {}", + ctx.start.getLine(), + ctx.start.getCharPositionInLine(), + ctx.getText(), + ctx.getClass().getSimpleName(), + type.getSimpleName(), + (result != null ? result.getClass().getSimpleName() : "null") + ); + } + + /** + * Extracts text from a parser tree context by joining all terminal nodes with a space delimiter. + * + * @param ctx The parser tree context + * + * @return The extracted text + */ + public static String extractText(ParserRuleContext ctx) { + return String.join(UNQUOTED_LITERAL_TERM_DELIMITER, extractTextTokens(ctx)); + } + + /** + * Checks if the given context contains any unescaped wildcard characters. + * + * @param ctx The tree context to check + * @return true if wildcards are present, false otherwise + */ + public static boolean hasWildcard(ParserRuleContext ctx) { + return ctx.children.stream().anyMatch(childNode -> { + if (childNode instanceof TerminalNode terminalNode) { + Token token = terminalNode.getSymbol(); + return switch (token.getType()) { + case KqlBaseParser.WILDCARD -> true; + case KqlBaseParser.UNQUOTED_LITERAL -> token.getText().matches("[^\\\\]*[*].*"); + default -> false; + }; + } + + return false; + }); + } + + /** + * Escapes special characters in a query string for use in Lucene queries. + * + * @param queryText The query text to escape + * @param preserveWildcards If true, does not escape wildcard characters (*) + * @return The escaped query string + */ + public static String escapeLuceneQueryString(String queryText, boolean preserveWildcards) { + if (preserveWildcards) { + StringBuilder escapedQuery = new StringBuilder(queryText.length()); + StringBuilder subpart = new StringBuilder(queryText.length()); + + for (char currentChar : queryText.toCharArray()) { + if (currentChar == WILDCARD_CHAR) { + escapedQuery.append(QueryParser.escape(subpart.toString())).append(currentChar); + subpart.setLength(0); + } else { + subpart.append(currentChar); + } + } + + return escapedQuery.append(QueryParser.escape(subpart.toString())).toString(); + } + + return QueryParser.escape(queryText); + } + + private static List extractTextTokens(ParserRuleContext ctx) { + assert ctx.children != null; + List textTokens = new ArrayList<>(ctx.children.size()); + + for (ParseTree currentNode : ctx.children) { + if (currentNode instanceof TerminalNode terminalNode) { + textTokens.add(extractText(terminalNode)); + } else { + throw new KqlParsingException("Unable to extract text from ctx", ctx.start.getLine(), ctx.start.getCharPositionInLine()); + } + } + + return textTokens; + } + + private static String extractText(TerminalNode node) { + if (node.getSymbol().getType() == KqlBaseParser.QUOTED_STRING) { + return unescapeQuotedString(node); + } else if (node.getSymbol().getType() == KqlBaseParser.UNQUOTED_LITERAL) { + return unescapeUnquotedLiteral(node); + } + + return node.getText(); + } + + private static String unescapeQuotedString(TerminalNode ctx) { + String inputText = ctx.getText(); + + assert inputText.length() >= 2 && inputText.charAt(0) == QUOTE_CHAR && inputText.charAt(inputText.length() - 1) == QUOTE_CHAR; + StringBuilder sb = new StringBuilder(); + + for (int i = 1; i < inputText.length() - 1;) { + char currentChar = inputText.charAt(i++); + if (currentChar == ESCAPE_CHAR && i + 1 < inputText.length()) { + currentChar = inputText.charAt(i++); + switch (currentChar) { + case 't' -> sb.append('\t'); + case 'n' -> sb.append('\n'); + case 'r' -> sb.append('\r'); + case 'u' -> i = handleUnicodeSequemce(ctx, sb, inputText, i); + case QUOTE_CHAR -> sb.append('\"'); + case ESCAPE_CHAR -> sb.append(ESCAPE_CHAR); + default -> sb.append(ESCAPE_CHAR).append(currentChar); + } + } else { + sb.append(currentChar); + } + } + + return sb.toString(); + } + + private static String unescapeUnquotedLiteral(TerminalNode ctx) { + String inputText = ctx.getText(); + + if (inputText == null || inputText.isEmpty()) { + return inputText; + } + StringBuilder sb = new StringBuilder(inputText.length()); + + for (int i = 0; i < inputText.length();) { + char currentChar = inputText.charAt(i++); + if (currentChar == ESCAPE_CHAR && i < inputText.length()) { + if (isEscapedKeywordSequence(inputText, i)) { + String sequence = handleKeywordSequence(inputText, i); + sb.append(sequence); + i += sequence.length(); + } else { + currentChar = inputText.charAt(i++); + switch (currentChar) { + case 't' -> sb.append('\t'); + case 'n' -> sb.append('\n'); + case 'r' -> sb.append('\r'); + case 'u' -> i = handleUnicodeSequemce(ctx, sb, inputText, i); + case QUOTE_CHAR -> sb.append('\"'); + case ESCAPE_CHAR -> sb.append(ESCAPE_CHAR); + case '(', ')', ':', '<', '>', '*', '{', '}' -> sb.append(currentChar); + default -> sb.append(ESCAPE_CHAR).append(currentChar); + } + } + } else { + sb.append(currentChar); + } + } + + return sb.toString(); + } + + private static boolean isEscapedKeywordSequence(String input, int startIndex) { + if (startIndex + 1 >= input.length()) { + return false; + } + String remaining = Strings.toRootLowerCase(input.substring(startIndex)); + return remaining.startsWith("and") || remaining.startsWith("or") || remaining.startsWith("not"); + } + + private static String handleKeywordSequence(String input, int startIndex) { + String remaining = input.substring(startIndex); + if (Strings.toRootLowerCase(remaining).startsWith("and")) return remaining.substring(0, 3); + if (Strings.toRootLowerCase(remaining).startsWith("or")) return remaining.substring(0, 2); + if (Strings.toRootLowerCase(remaining).startsWith("not")) return remaining.substring(0, 3); + return ""; + } + + private static int handleUnicodeSequemce(TerminalNode ctx, StringBuilder sb, String text, int startIdx) { + int endIdx = startIdx + 4; + String hex = text.substring(startIdx, endIdx); + + try { + int code = Integer.parseInt(hex, 16); + + if (code >= 0xD800 && code <= 0xDFFF) { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + throw new KqlParsingException( + "Invalid unicode character code, [{}] is a surrogate code", + ctx.getSymbol().getLine(), + ctx.getSymbol().getCharPositionInLine() + startIdx, + hex + ); + } + sb.append(String.valueOf(Character.toChars(code))); + } catch (IllegalArgumentException e) { + throw new KqlParsingException( + "Invalid unicode character code [{}]", + ctx.getSymbol().getLine(), + ctx.getSymbol().getCharPositionInLine() + startIdx, + hex + ); + } + + return endIdx; + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java new file mode 100644 index 0000000000000..05474bcedd4c8 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/ParserUtilsTests.java @@ -0,0 +1,280 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.antlr.v4.runtime.tree.TerminalNodeImpl; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.QUOTED_STRING; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.UNQUOTED_LITERAL; +import static org.elasticsearch.xpack.kql.parser.KqlBaseParser.WILDCARD; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.escapeLuceneQueryString; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.extractText; +import static org.elasticsearch.xpack.kql.parser.ParserUtils.hasWildcard; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ParserUtilsTests extends ESTestCase { + + public void testExtractTestWithQuotedString() { + // General case + assertThat(extractText(parserRuleContext(quotedStringNode("foo"))), equalTo("foo")); + + // Empty string + assertThat(extractText(parserRuleContext(quotedStringNode(""))), equalTo("")); + + // Whitespaces are preserved + assertThat(extractText(parserRuleContext(quotedStringNode(" foo bar "))), equalTo(" foo bar ")); + + // Quoted string does not need escaping for KQL keywords (and, or, ...) + assertThat(extractText(parserRuleContext(quotedStringNode("not foo and bar or baz"))), equalTo("not foo and bar or baz")); + + // Quoted string does not need escaping for KQL special chars (e.g: '{', ':', ...) + assertThat(extractText(parserRuleContext(quotedStringNode("foo*:'\u3000{(})"))), equalTo("foo*:'\u3000{(})")); + + // Escaped characters handling + assertThat(extractText(parserRuleContext(quotedStringNode("\\\\"))), equalTo("\\")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\\bar"))), equalTo("foo\\bar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\\"))), equalTo("foo\\")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\\\foo"))), equalTo("\\foo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\\""))), equalTo("\"")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\"bar"))), equalTo("foo\"bar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\\""))), equalTo("foo\"")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\\"foo"))), equalTo("\"foo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\t"))), equalTo("\t")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\tbar"))), equalTo("foo\tbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\t"))), equalTo("foo\t")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\tfoo"))), equalTo("\tfoo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\n"))), equalTo("\n")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\nbar"))), equalTo("foo\nbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\n"))), equalTo("foo\n")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\nfoo"))), equalTo("\nfoo")); + + assertThat(extractText(parserRuleContext(quotedStringNode("\\r"))), equalTo("\r")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\rbar"))), equalTo("foo\rbar")); + assertThat(extractText(parserRuleContext(quotedStringNode("foo\\r"))), equalTo("foo\r")); + assertThat(extractText(parserRuleContext(quotedStringNode("\\rfoo"))), equalTo("\rfoo")); + + // Unicode characters handling (\u0041 is 'A') + assertThat(extractText(parserRuleContext(quotedStringNode(format("\\u0041")))), equalTo("A")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("foo\\u0041bar")))), equalTo("fooAbar")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("foo\\u0041")))), equalTo("fooA")); + assertThat(extractText(parserRuleContext(quotedStringNode(format("\\u0041foo")))), equalTo("Afoo")); + } + + public void testExtractTestWithUnquotedLiteral() { + // General case + assertThat(extractText(parserRuleContext(literalNode("foo"))), equalTo("foo")); + + // KQL keywords unescaping + assertThat(extractText(parserRuleContext(literalNode("\\not foo \\and bar \\or baz"))), equalTo("not foo and bar or baz")); + assertThat( + extractText(parserRuleContext(literalNode("\\\\not foo \\\\and bar \\\\or baz"))), + equalTo("\\not foo \\and bar \\or baz") + ); + + // Escaped characters handling + assertThat(extractText(parserRuleContext(literalNode("\\\\"))), equalTo("\\")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\\bar"))), equalTo("foo\\bar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\\"))), equalTo("foo\\")); + assertThat(extractText(parserRuleContext(literalNode("\\\\foo"))), equalTo("\\foo")); + + assertThat(extractText(parserRuleContext(literalNode("\\\""))), equalTo("\"")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\"bar"))), equalTo("foo\"bar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\\""))), equalTo("foo\"")); + assertThat(extractText(parserRuleContext(literalNode("\\\"foo"))), equalTo("\"foo")); + + assertThat(extractText(parserRuleContext(literalNode("\\t"))), equalTo("\t")); + assertThat(extractText(parserRuleContext(literalNode("foo\\tbar"))), equalTo("foo\tbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\t"))), equalTo("foo\t")); + assertThat(extractText(parserRuleContext(literalNode("\\tfoo"))), equalTo("\tfoo")); + + assertThat(extractText(parserRuleContext(literalNode("\\n"))), equalTo("\n")); + assertThat(extractText(parserRuleContext(literalNode("foo\\nbar"))), equalTo("foo\nbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\n"))), equalTo("foo\n")); + assertThat(extractText(parserRuleContext(literalNode("\\nfoo"))), equalTo("\nfoo")); + + assertThat(extractText(parserRuleContext(literalNode("\\r"))), equalTo("\r")); + assertThat(extractText(parserRuleContext(literalNode("foo\\rbar"))), equalTo("foo\rbar")); + assertThat(extractText(parserRuleContext(literalNode("foo\\r"))), equalTo("foo\r")); + assertThat(extractText(parserRuleContext(literalNode("\\rfoo"))), equalTo("\rfoo")); + + for (String escapedChar : List.of("(", ")", ":", "<", ">", "*", "{", "}")) { + assertThat(extractText(parserRuleContext(literalNode(format("\\%s", escapedChar)))), equalTo(escapedChar)); + assertThat( + extractText(parserRuleContext(literalNode(format("foo\\%sbar", escapedChar)))), + equalTo(format("foo%sbar", escapedChar)) + ); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\%s", escapedChar)))), equalTo(format("foo%s", escapedChar))); + assertThat(extractText(parserRuleContext(literalNode(format("\\%sfoo", escapedChar)))), equalTo(format("%sfoo", escapedChar))); + } + + // Unicode characters handling (\u0041 is 'A') + assertThat(extractText(parserRuleContext(literalNode(format("\\u0041")))), equalTo("A")); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\u0041bar")))), equalTo("fooAbar")); + assertThat(extractText(parserRuleContext(literalNode(format("foo\\u0041")))), equalTo("fooA")); + assertThat(extractText(parserRuleContext(literalNode(format("\\u0041foo")))), equalTo("Afoo")); + } + + public void testHasWildcard() { + // No children + assertFalse(hasWildcard(parserRuleContext(List.of()))); + + // Lone wildcard + assertTrue(hasWildcard(parserRuleContext(wildcardNode()))); + assertTrue(hasWildcard(parserRuleContext(randomTextNodeListWithNode(wildcardNode())))); + + // All children are literals + assertFalse(hasWildcard(parserRuleContext(randomList(1, randomIntBetween(1, 100), ParserUtilsTests::randomLiteralNode)))); + + // Quoted string + assertFalse(hasWildcard(parserRuleContext(randomQuotedStringNode()))); + + // Literal node containing the wildcard character + assertTrue(hasWildcard(parserRuleContext(literalNode("f*oo")))); + assertTrue(hasWildcard(parserRuleContext(literalNode("*foo")))); + assertTrue(hasWildcard(parserRuleContext(literalNode("foo*")))); + + // Literal node containing the wildcard characters (escaped) + assertFalse(hasWildcard(parserRuleContext(literalNode("f\\*oo")))); + assertFalse(hasWildcard(parserRuleContext(literalNode("\\*foo")))); + assertFalse(hasWildcard(parserRuleContext(literalNode("foo\\*")))); + } + + public void testUnquotedLiteralInvalidUnicodeCodeParsing() { + { + // Invalid unicode digit (G) + ParserRuleContext ctx = parserRuleContext(literalNode("\\u0G41")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:3: Invalid unicode character code [0G41]")); + } + + { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + ParserRuleContext ctx = parserRuleContext(literalNode("\\uD900")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:3: Invalid unicode character code, [D900] is a surrogate code")); + } + } + + public void testQuotedStringInvalidUnicodeCodeParsing() { + { + // Invalid unicode digit (G) + ParserRuleContext ctx = parserRuleContext(quotedStringNode("\\u0G41")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:4: Invalid unicode character code [0G41]")); + } + + { + // U+D800—U+DFFF can only be used as surrogate pairs and are not valid character codes. + ParserRuleContext ctx = parserRuleContext(quotedStringNode("\\uD900")); + KqlParsingException e = assertThrows(KqlParsingException.class, () -> extractText(ctx)); + assertThat(e.getMessage(), equalTo("line 0:4: Invalid unicode character code, [D900] is a surrogate code")); + } + } + + public void testEscapeLuceneQueryString() { + // Quotes + assertThat(escapeLuceneQueryString("\"The Pink Panther\"", randomBoolean()), equalTo("\\\"The Pink Panther\\\"")); + + // Escape chars + assertThat(escapeLuceneQueryString("The Pink \\ Panther", randomBoolean()), equalTo("The Pink \\\\ Panther")); + + // Field operations + assertThat(escapeLuceneQueryString("title:Do it right", randomBoolean()), equalTo("title\\:Do it right")); + assertThat(escapeLuceneQueryString("title:(pink panther)", randomBoolean()), equalTo("title\\:\\(pink panther\\)")); + assertThat(escapeLuceneQueryString("title:-pink", randomBoolean()), equalTo("title\\:\\-pink")); + assertThat(escapeLuceneQueryString("title:+pink", randomBoolean()), equalTo("title\\:\\+pink")); + assertThat(escapeLuceneQueryString("title:pink~", randomBoolean()), equalTo("title\\:pink\\~")); + assertThat(escapeLuceneQueryString("title:pink~3.5", randomBoolean()), equalTo("title\\:pink\\~3.5")); + assertThat(escapeLuceneQueryString("title:pink panther^4", randomBoolean()), equalTo("title\\:pink panther\\^4")); + assertThat(escapeLuceneQueryString("rating:[0 TO 5]", randomBoolean()), equalTo("rating\\:\\[0 TO 5\\]")); + assertThat(escapeLuceneQueryString("rating:{0 TO 5}", randomBoolean()), equalTo("rating\\:\\{0 TO 5\\}")); + + // Boolean operators + assertThat(escapeLuceneQueryString("foo || bar", randomBoolean()), equalTo("foo \\|\\| bar")); + assertThat(escapeLuceneQueryString("foo && bar", randomBoolean()), equalTo("foo \\&\\& bar")); + assertThat(escapeLuceneQueryString("!foo", randomBoolean()), equalTo("\\!foo")); + + // Wildcards: + assertThat(escapeLuceneQueryString("te?t", randomBoolean()), equalTo("te\\?t")); + assertThat(escapeLuceneQueryString("foo*", true), equalTo("foo*")); + assertThat(escapeLuceneQueryString("*foo", true), equalTo("*foo")); + assertThat(escapeLuceneQueryString("foo * bar", true), equalTo("foo * bar")); + assertThat(escapeLuceneQueryString("foo*", false), equalTo("foo\\*")); + } + + private static ParserRuleContext parserRuleContext(ParseTree child) { + return parserRuleContext(List.of(child)); + } + + private static ParserRuleContext parserRuleContext(List children) { + ParserRuleContext ctx = new ParserRuleContext(null, randomInt()); + ctx.children = children; + return ctx; + } + + private static TerminalNode terminalNode(int type, String text) { + Token symbol = mock(Token.class); + when(symbol.getType()).thenReturn(type); + when(symbol.getText()).thenReturn(text); + when(symbol.getLine()).thenReturn(0); + when(symbol.getCharPositionInLine()).thenReturn(0); + return new TerminalNodeImpl(symbol); + } + + private static List randomTextNodeListWithNode(TerminalNode node) { + List nodes = new ArrayList<>( + Stream.concat(Stream.generate(ParserUtilsTests::randomTextNode).limit(100), Stream.of(node)).toList() + ); + Collections.shuffle(nodes, random()); + return nodes; + } + + private static TerminalNode randomTextNode() { + return switch (randomInt() % 3) { + case 0 -> wildcardNode(); + case 1 -> randomQuotedStringNode(); + default -> randomLiteralNode(); + }; + } + + private static TerminalNode quotedStringNode(String quotedStringText) { + return terminalNode(QUOTED_STRING, "\"" + quotedStringText + "\""); + } + + private static TerminalNode randomQuotedStringNode() { + return quotedStringNode(randomIdentifier()); + } + + private static TerminalNode literalNode(String literalText) { + return terminalNode(UNQUOTED_LITERAL, literalText); + } + + private static TerminalNode randomLiteralNode() { + return terminalNode(UNQUOTED_LITERAL, randomIdentifier()); + } + + private static TerminalNode wildcardNode() { + return terminalNode(WILDCARD, "*"); + } +} From 3294c679eb9b2fee0d6aca529da3fe2e2745db18 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Fri, 25 Oct 2024 13:57:43 -0400 Subject: [PATCH 161/202] Fix test mute 115605 (#115659) * Unmuting test issue #115605 * fixing --------- Co-authored-by: Elastic Machine --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 70f29016d8475..abd483c1bc67e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -270,9 +270,6 @@ tests: - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/115600 -- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT - method: test {yaml=indices.create/10_basic/Create lookup index} - issue: https://github.com/elastic/elasticsearch/issues/115605 - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 From 679ef122091e825401993530e38dcabe26f375bf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 05:31:10 +1100 Subject: [PATCH 162/202] Mute org.elasticsearch.index.get.GetResultTests testToAndFromXContent #115688 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index abd483c1bc67e..f92795c18e2d2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch issue: https://github.com/elastic/elasticsearch/issues/115652 +- class: org.elasticsearch.index.get.GetResultTests + method: testToAndFromXContent + issue: https://github.com/elastic/elasticsearch/issues/115688 # Examples: # From 8240f409f943b1f2f865a4704213cdb79b047881 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 05:31:46 +1100 Subject: [PATCH 163/202] Mute org.elasticsearch.action.update.UpdateResponseTests testToAndFromXContent #115689 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f92795c18e2d2..91644c9af70ca 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115688 +- class: org.elasticsearch.action.update.UpdateResponseTests + method: testToAndFromXContent + issue: https://github.com/elastic/elasticsearch/issues/115689 # Examples: # From 68316f7d17da89a11695e62561b4b0e099c0c3ff Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 25 Oct 2024 20:52:06 +0200 Subject: [PATCH 164/202] Remove metering from ingest service to occur afterwards when parsing the final document (#114895) --- ...eteringParserDecoratorWithPipelinesIT.java | 137 ------------------ .../XContentMeteringParserDecoratorIT.java | 11 +- .../org/elasticsearch/TransportVersions.java | 1 + .../action/index/IndexRequest.java | 81 +++-------- .../action/update/UpdateHelper.java | 17 +-- .../index/mapper/DocumentParser.java | 2 +- .../index/mapper/ParsedDocument.java | 21 +-- .../index/mapper/SourceToParse.java | 2 +- .../elasticsearch/ingest/IngestService.java | 16 +- .../elasticsearch/node/NodeConstruction.java | 9 +- .../internal/DocumentParsingProvider.java | 4 +- .../XContentMeteringParserDecorator.java | 8 +- .../bulk/TransportShardBulkActionTests.java | 32 ++-- .../ingest/ReservedPipelineActionTests.java | 2 - .../action/update/UpdateRequestTests.java | 5 +- .../index/IndexingSlowLogTests.java | 12 +- .../index/engine/InternalEngineTests.java | 4 +- .../index/shard/RefreshListenersTests.java | 4 +- .../index/translog/TranslogTests.java | 4 +- .../ingest/IngestServiceTests.java | 77 ---------- .../ingest/SimulateIngestServiceTests.java | 2 - .../snapshots/SnapshotResiliencyTests.java | 3 +- .../index/engine/EngineTestCase.java | 3 +- ...sportGetTrainedModelsStatsActionTests.java | 2 - .../authz/AuthorizationServiceTests.java | 3 +- 25 files changed, 87 insertions(+), 375 deletions(-) delete mode 100644 modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java deleted file mode 100644 index 3547b3f9910ad..0000000000000 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.plugins.internal; - -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.ingest.common.IngestCommonPlugin; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.FilterXContentParserWrapper; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class XContentMeteringParserDecoratorWithPipelinesIT extends ESIntegTestCase { - - private static String TEST_INDEX_NAME = "test-index-name"; - // the assertions are done in plugin which is static and will be created by ES server. - // hence a static flag to make sure it is indeed used - public static volatile boolean hasWrappedParser; - public static AtomicLong providedFixedSize = new AtomicLong(); - - public void testDocumentIsReportedWithPipelines() throws Exception { - hasWrappedParser = false; - // pipeline adding fields, changing destination is not affecting reporting - putJsonPipeline("pipeline", """ - { - "processors": [ - { - "set": { - "field": "my-text-field", - "value": "xxxx" - } - }, - { - "set": { - "field": "my-boolean-field", - "value": true - } - } - ] - } - """); - - client().index( - new IndexRequest(TEST_INDEX_NAME).setPipeline("pipeline") - .id("1") - .source(jsonBuilder().startObject().field("test", "I am sam i am").endObject()) - ).actionGet(); - assertBusy(() -> { - // ingest node has used an observer that was counting #map operations - // and passed that info to newFixedSize observer in TransportShardBulkAction - assertTrue(hasWrappedParser); - assertThat(providedFixedSize.get(), equalTo(1L)); - }); - } - - @Override - protected Collection> nodePlugins() { - return List.of(TestDocumentParsingProviderPlugin.class, IngestCommonPlugin.class); - } - - public static class TestDocumentParsingProviderPlugin extends Plugin implements DocumentParsingProviderPlugin, IngestPlugin { - - public TestDocumentParsingProviderPlugin() {} - - @Override - public DocumentParsingProvider getDocumentParsingProvider() { - // returns a static instance, because we want to assert that the wrapping is called only once - return new DocumentParsingProvider() { - @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { - if (request instanceof IndexRequest indexRequest && indexRequest.getNormalisedBytesParsed() > 0) { - long normalisedBytesParsed = indexRequest.getNormalisedBytesParsed(); - providedFixedSize.set(normalisedBytesParsed); - return new TestXContentMeteringParserDecorator(normalisedBytesParsed); - } - return new TestXContentMeteringParserDecorator(0L); - } - - @Override - public DocumentSizeReporter newDocumentSizeReporter( - String indexName, - MapperService mapperService, - DocumentSizeAccumulator documentSizeAccumulator - ) { - return DocumentSizeReporter.EMPTY_INSTANCE; - } - }; - } - } - - public static class TestXContentMeteringParserDecorator implements XContentMeteringParserDecorator { - long mapCounter = 0; - - public TestXContentMeteringParserDecorator(long mapCounter) { - this.mapCounter = mapCounter; - } - - @Override - public XContentParser decorate(XContentParser xContentParser) { - hasWrappedParser = true; - return new FilterXContentParserWrapper(xContentParser) { - - @Override - public Map map() throws IOException { - mapCounter++; - return super.map(); - } - }; - } - - @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - return new ParsedDocument.DocumentSize(mapCounter, 0); - } - } - -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java index f11c145f71f23..f70667b91aec8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; @@ -126,7 +125,7 @@ public TestDocumentParsingProviderPlugin() {} public DocumentParsingProvider getDocumentParsingProvider() { return new DocumentParsingProvider() { @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + public XContentMeteringParserDecorator newMeteringParserDecorator(IndexRequest request) { return new TestXContentMeteringParserDecorator(0L); } @@ -152,8 +151,8 @@ public TestDocumentSizeReporter(String indexName) { @Override public void onIndexingCompleted(ParsedDocument parsedDocument) { - long delta = parsedDocument.getNormalizedSize().ingestedBytes(); - if (delta > 0) { + long delta = parsedDocument.getNormalizedSize(); + if (delta > XContentMeteringParserDecorator.UNKNOWN_SIZE) { COUNTER.addAndGet(delta); } assertThat(indexName, equalTo(TEST_INDEX_NAME)); @@ -181,8 +180,8 @@ public Token nextToken() throws IOException { } @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - return new ParsedDocument.DocumentSize(counter, counter); + public long meteredDocumentSize() { + return counter; } } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 25bb792d827a9..3986ea4b97254 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -182,6 +182,7 @@ static TransportVersion def(int id) { public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); + public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b98f5d87ee232..d0785a60dd0f5 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -146,9 +146,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement * rawTimestamp field is used on the coordinate node, it doesn't need to be serialised. */ private Object rawTimestamp; - private long normalisedBytesParsed = -1; - private boolean originatesFromUpdateByScript; - private boolean originatesFromUpdateByDoc; public IndexRequest(StreamInput in) throws IOException { this(null, in); @@ -183,7 +180,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio dynamicTemplates = in.readMap(StreamInput::readString); if (in.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && in.getTransportVersion().before(TransportVersions.V_8_13_0)) { - in.readBoolean(); + in.readBoolean(); // obsolete, prior to tracking normalisedBytesParsed } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.listExecutedPipelines = in.readBoolean(); @@ -196,21 +193,20 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { requireDataStream = in.readBoolean(); - normalisedBytesParsed = in.readZLong(); } else { requireDataStream = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { - originatesFromUpdateByScript = in.readBoolean(); - } else { - originatesFromUpdateByScript = false; - } - - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { - originatesFromUpdateByDoc = in.readBoolean(); - } else { - originatesFromUpdateByDoc = false; + if (in.getTransportVersion().before(TransportVersions.INDEX_REQUEST_REMOVE_METERING)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { + in.readZLong(); // obsolete normalisedBytesParsed + } + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + in.readBoolean(); // obsolete originatesFromUpdateByScript + } + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + in.readBoolean(); // obsolete originatesFromUpdateByDoc + } } } @@ -759,7 +755,7 @@ private void writeBody(StreamOutput out) throws IOException { out.writeMap(dynamicTemplates, StreamOutput::writeString); if (out.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED) && out.getTransportVersion().before(TransportVersions.V_8_13_0)) { - out.writeBoolean(normalisedBytesParsed != -1L); + out.writeBoolean(false); // obsolete, prior to tracking normalisedBytesParsed } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeBoolean(listExecutedPipelines); @@ -770,15 +766,18 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeBoolean(requireDataStream); - out.writeZLong(normalisedBytesParsed); - } - - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { - out.writeBoolean(originatesFromUpdateByScript); } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { - out.writeBoolean(originatesFromUpdateByDoc); + if (out.getTransportVersion().before(TransportVersions.INDEX_REQUEST_REMOVE_METERING)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { + out.writeZLong(-1); // obsolete normalisedBytesParsed + } + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + out.writeBoolean(false); // obsolete originatesFromUpdateByScript + } + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { + out.writeBoolean(false); // obsolete originatesFromUpdateByDoc + } } } @@ -928,24 +927,6 @@ public void setRawTimestamp(Object rawTimestamp) { this.rawTimestamp = rawTimestamp; } - /** - * Returns a number of bytes observed when parsing a document in earlier stages of ingestion (like update/ingest service) - * Defaults to -1 when a document size was not observed in earlier stages. - * @return a number of bytes observed - */ - public long getNormalisedBytesParsed() { - return normalisedBytesParsed; - } - - /** - * Sets number of bytes observed by a DocumentSizeObserver - * @return an index request - */ - public IndexRequest setNormalisedBytesParsed(long normalisedBytesParsed) { - this.normalisedBytesParsed = normalisedBytesParsed; - return this; - } - /** * Adds the pipeline to the list of executed pipelines, if listExecutedPipelines is true * @@ -976,22 +957,4 @@ public List getExecutedPipelines() { return Collections.unmodifiableList(executedPipelines); } } - - public IndexRequest setOriginatesFromUpdateByScript(boolean originatesFromUpdateByScript) { - this.originatesFromUpdateByScript = originatesFromUpdateByScript; - return this; - } - - public boolean originatesFromUpdateByScript() { - return originatesFromUpdateByScript; - } - - public boolean originatesFromUpdateByDoc() { - return originatesFromUpdateByDoc; - } - - public IndexRequest setOriginatesFromUpdateByDoc(boolean originatesFromUpdateByDoc) { - this.originatesFromUpdateByDoc = originatesFromUpdateByDoc; - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 212b99ca140d3..d32e102b2e18b 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -28,8 +28,7 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; +import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.UpdateCtxMap; @@ -51,11 +50,9 @@ public class UpdateHelper { private static final Logger logger = LogManager.getLogger(UpdateHelper.class); private final ScriptService scriptService; - private final DocumentParsingProvider documentParsingProvider; - public UpdateHelper(ScriptService scriptService, DocumentParsingProvider documentParsingProvider) { + public UpdateHelper(ScriptService scriptService) { this.scriptService = scriptService; - this.documentParsingProvider = documentParsingProvider; } /** @@ -183,14 +180,13 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat Result prepareUpdateIndexRequest(IndexShard indexShard, UpdateRequest request, GetResult getResult, boolean detectNoop) { final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator(request); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); final boolean noop = XContentHelper.update( updatedSourceAsMap, - currentRequest.sourceAsMap(meteringParserDecorator), + currentRequest.sourceAsMap(XContentParserDecorator.NOOP), detectNoop ) == false; @@ -228,9 +224,7 @@ Result prepareUpdateIndexRequest(IndexShard indexShard, UpdateRequest request, G .setIfPrimaryTerm(getResult.getPrimaryTerm()) .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) - .setRefreshPolicy(request.getRefreshPolicy()) - .setOriginatesFromUpdateByDoc(true); - finalIndexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(finalIndexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } } @@ -272,8 +266,7 @@ Result prepareUpdateScriptRequest(IndexShard indexShard, UpdateRequest request, .setIfPrimaryTerm(getResult.getPrimaryTerm()) .waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()) - .setRefreshPolicy(request.getRefreshPolicy()) - .setOriginatesFromUpdateByScript(true); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } case DELETE -> { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 1ed0a117ddd89..bde9b0fb8a4ab 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -80,7 +80,7 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL final RootDocumentParserContext context; final XContentType xContentType = source.getXContentType(); - XContentMeteringParserDecorator meteringParserDecorator = source.getDocumentSizeObserver(); + XContentMeteringParserDecorator meteringParserDecorator = source.getMeteringParserDecorator(); try ( XContentParser parser = meteringParserDecorator.decorate( XContentHelper.createParser(parserConfiguration, source.source(), xContentType) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index b1d882f04de54..f2ddf38fe4357 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.xcontent.XContentType; import java.util.Collections; @@ -24,6 +25,7 @@ * The result of parsing a document. */ public class ParsedDocument { + private final Field version; private final String id; @@ -33,7 +35,7 @@ public class ParsedDocument { private final List documents; - private final DocumentSize normalizedSize; + private final long normalizedSize; private BytesReference source; private XContentType xContentType; @@ -61,7 +63,7 @@ public static ParsedDocument noopTombstone(String reason) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } @@ -86,7 +88,7 @@ public static ParsedDocument deleteTombstone(String id) { new BytesArray("{}"), XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } @@ -99,7 +101,7 @@ public ParsedDocument( BytesReference source, XContentType xContentType, Mapping dynamicMappingsUpdate, - DocumentSize normalizedSize + long normalizedSize ) { this.version = version; this.seqID = seqID; @@ -178,16 +180,7 @@ public String documentDescription() { return "id"; } - public DocumentSize getNormalizedSize() { + public long getNormalizedSize() { return normalizedSize; } - - /** - * Normalized ingested and stored size of a document. - * @param ingestedBytes ingest size of the document - * @param storedBytes stored retained size of the document - */ - public record DocumentSize(long ingestedBytes, long storedBytes) { - public static final DocumentSize UNKNOWN = new DocumentSize(-1, -1); - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index a8cb03c223833..879e0fe785df2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -91,7 +91,7 @@ public XContentType getXContentType() { return this.xContentType; } - public XContentMeteringParserDecorator getDocumentSizeObserver() { + public XContentMeteringParserDecorator getMeteringParserDecorator() { return meteringParserDecorator; } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 99ff44a3cd135..b5ac54b018e46 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -68,8 +68,6 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.node.ReportingService; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.Scheduler; @@ -121,7 +119,6 @@ public class IngestService implements ClusterStateApplier, ReportingService taskQueue; private final ClusterService clusterService; private final ScriptService scriptService; - private final DocumentParsingProvider documentParsingProvider; private final Map processorFactories; // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. // We know of all the processor factories when a node with all its plugin have been initialized. Also some @@ -204,12 +201,10 @@ public IngestService( List ingestPlugins, Client client, MatcherWatchdog matcherWatchdog, - DocumentParsingProvider documentParsingProvider, FailureStoreMetrics failureStoreMetrics ) { this.clusterService = clusterService; this.scriptService = scriptService; - this.documentParsingProvider = documentParsingProvider; this.processorFactories = processorFactories( ingestPlugins, new Processor.Parameters( @@ -238,7 +233,6 @@ public IngestService( IngestService(IngestService ingestService) { this.clusterService = ingestService.clusterService; this.scriptService = ingestService.scriptService; - this.documentParsingProvider = ingestService.documentParsingProvider; this.processorFactories = ingestService.processorFactories; this.threadPool = ingestService.threadPool; this.taskQueue = ingestService.taskQueue; @@ -776,10 +770,7 @@ protected void doRun() { } final int slot = i; final Releasable ref = refs.acquire(); - final XContentMeteringParserDecorator meteringParserDecorator = documentParsingProvider.newMeteringParserDecorator( - indexRequest - ); - final IngestDocument ingestDocument = newIngestDocument(indexRequest, meteringParserDecorator); + final IngestDocument ingestDocument = newIngestDocument(indexRequest); final org.elasticsearch.script.Metadata originalDocumentMetadata = ingestDocument.getMetadata().clone(); // the document listener gives us three-way logic: a document can fail processing (1), or it can // be successfully processed. a successfully processed document can be kept (2) or dropped (3). @@ -820,7 +811,6 @@ public void onFailure(Exception e) { ); executePipelines(pipelines, indexRequest, ingestDocument, resolveFailureStore, documentListener); - indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; i++; @@ -1159,14 +1149,14 @@ static String getProcessorName(Processor processor) { /** * Builds a new ingest document from the passed-in index request. */ - private static IngestDocument newIngestDocument(final IndexRequest request, XContentParserDecorator parserDecorator) { + private static IngestDocument newIngestDocument(final IndexRequest request) { return new IngestDocument( request.index(), request.id(), request.version(), request.routing(), request.versionType(), - request.sourceAsMap(parserDecorator) + request.sourceAsMap(XContentParserDecorator.NOOP) ); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 784e02059823b..0a88a202ac8d3 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -285,7 +285,7 @@ static NodeConstruction prepareConstruction( ScriptService scriptService = constructor.createScriptService(settingsModule, threadPool, serviceProvider); - constructor.createUpdateHelper(documentParsingProvider, scriptService); + constructor.createUpdateHelper(scriptService); constructor.construct( threadPool, @@ -643,10 +643,10 @@ private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalReten return dataStreamGlobalRetentionSettings; } - private UpdateHelper createUpdateHelper(DocumentParsingProvider documentParsingProvider, ScriptService scriptService) { - UpdateHelper updateHelper = new UpdateHelper(scriptService, documentParsingProvider); + private UpdateHelper createUpdateHelper(ScriptService scriptService) { + UpdateHelper updateHelper = new UpdateHelper(scriptService); - modules.add(b -> { b.bind(UpdateHelper.class).toInstance(new UpdateHelper(scriptService, documentParsingProvider)); }); + modules.add(b -> b.bind(UpdateHelper.class).toInstance(updateHelper)); return updateHelper; } @@ -701,7 +701,6 @@ private void construct( pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), - documentParsingProvider, failureStoreMetrics ); diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java index e1613caf9deac..9df7fd4c3bd43 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/DocumentParsingProvider.java @@ -9,7 +9,7 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.index.mapper.MapperService; /** @@ -40,7 +40,7 @@ default DocumentSizeAccumulator createDocumentSizeAccumulator() { /** * @return an observer */ - default XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { + default XContentMeteringParserDecorator newMeteringParserDecorator(IndexRequest request) { return XContentMeteringParserDecorator.NOOP; } } diff --git a/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java index e3b4415edcc01..6ccdac19acb91 100644 --- a/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java +++ b/server/src/main/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecorator.java @@ -9,17 +9,17 @@ package org.elasticsearch.plugins.internal; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.xcontent.XContentParser; public interface XContentMeteringParserDecorator extends XContentParserDecorator { + long UNKNOWN_SIZE = -1; /** * a default noop implementation */ XContentMeteringParserDecorator NOOP = new XContentMeteringParserDecorator() { @Override - public DocumentSize meteredDocumentSize() { - return DocumentSize.UNKNOWN; + public long meteredDocumentSize() { + return UNKNOWN_SIZE; } @Override @@ -28,5 +28,5 @@ public XContentParser decorate(XContentParser xContentParser) { } }; - DocumentSize meteredDocumentSize(); + long meteredDocumentSize(); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 35ef892da59a2..b389e33993b9b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -49,11 +49,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.internal.DocumentParsingProvider; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.mockito.ArgumentCaptor; import org.mockito.MockingDetails; import org.mockito.Mockito; import org.mockito.stubbing.Stubbing; @@ -114,13 +114,18 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(create); + IndexRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE).create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); randomlySetIgnoredPrimaryResponse(primaryRequest); + DocumentParsingProvider documentParsingProvider = mock(); + XContentMeteringParserDecorator parserDecorator = mock(); + when(documentParsingProvider.newMeteringParserDecorator(any())).thenReturn(parserDecorator); + when(parserDecorator.decorate(any())).then(i -> i.getArgument(0)); + BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest( context, @@ -129,7 +134,7 @@ public void testExecuteBulkIndexRequest() throws Exception { new NoopMappingUpdatePerformer(), (listener, mappingVersion) -> {}, ASSERTING_DONE_LISTENER, - DocumentParsingProvider.EMPTY_INSTANCE + documentParsingProvider ); assertFalse(context.hasMoreOperationsToExecute()); @@ -185,6 +190,8 @@ public void testExecuteBulkIndexRequest() throws Exception { assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); assertThat(replicaRequest, equalTo(primaryRequest)); + verify(documentParsingProvider).newMeteringParserDecorator(any()); + verify(parserDecorator).decorate(any()); // Assert that the document count is still 1 assertDocCount(shard, 1); @@ -600,9 +607,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { .retryOnConflict(retries); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index").id("id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - .setNormalisedBytesParsed(0);// let's pretend this was modified by a script + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); DocumentParsingProvider documentParsingProvider = mock(DocumentParsingProvider.class); Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); @@ -655,11 +660,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); - // we have set 0 value on normalisedBytesParsed on the IndexRequest, like it happens with updates by script. - ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(retries + 1)).newMeteringParserDecorator(argument.capture()); - IndexRequest value = argument.getValue(); - assertThat(value.getNormalisedBytesParsed(), equalTo(0L)); + verify(documentParsingProvider, times(retries + 1)).newMeteringParserDecorator(any()); } @SuppressWarnings("unchecked") @@ -668,9 +669,7 @@ public void testUpdateRequestWithSuccess() throws Exception { DocWriteRequest writeRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); - IndexRequest updateResponse = new IndexRequest("index").id("id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value") - .setNormalisedBytesParsed(100L); + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); DocumentParsingProvider documentParsingProvider = mock(DocumentParsingProvider.class); boolean created = randomBoolean(); @@ -721,10 +720,7 @@ public void testUpdateRequestWithSuccess() throws Exception { assertThat(response.status(), equalTo(created ? RestStatus.CREATED : RestStatus.OK)); assertThat(response.getSeqNo(), equalTo(13L)); - ArgumentCaptor argument = ArgumentCaptor.forClass(IndexRequest.class); - verify(documentParsingProvider, times(1)).newMeteringParserDecorator(argument.capture()); - IndexRequest value = argument.getValue(); - assertThat(value.getNormalisedBytesParsed(), equalTo(100L)); + verify(documentParsingProvider).newMeteringParserDecorator(updateResponse); } public void testUpdateWithDelete() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 9729b653ae3d2..331f754d437a7 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.ProcessorInfo; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.reservedstate.service.ReservedClusterStateService; @@ -94,7 +93,6 @@ public void setup() { Collections.singletonList(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index d8960bd902ac5..0cc2dcf38e8ff 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptEngine; @@ -121,7 +120,7 @@ public void setUp() throws Exception { final MockScriptEngine engine = new MockScriptEngine("mock", scripts, Collections.emptyMap()); Map engines = Collections.singletonMap(engine.getType(), engine); ScriptService scriptService = new ScriptService(baseSettings, engines, ScriptModule.CORE_CONTEXTS, () -> 1L); - updateHelper = new UpdateHelper(scriptService, DocumentParsingProvider.EMPTY_INSTANCE); + updateHelper = new UpdateHelper(scriptService); } @SuppressWarnings("unchecked") @@ -594,7 +593,7 @@ public void testNoopDetection() throws Exception { try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))) { request = new UpdateRequest("test", "1").fromXContent(parser); } - UpdateHelper updateHelper = new UpdateHelper(mock(ScriptService.class), DocumentParsingProvider.EMPTY_INSTANCE); + UpdateHelper updateHelper = new UpdateHelper(mock(ScriptService.class)); UpdateHelper.Result result = updateHelper.prepareUpdateIndexRequest(indexShard, request, getResult, true); assertThat(result.action(), instanceOf(UpdateResponse.class)); diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 753602e73a30a..c626be7983c46 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; @@ -217,7 +217,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -246,7 +246,7 @@ public void testSlowLogMessageHasAdditionalFields() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -276,7 +276,7 @@ public void testEmptyRoutingField() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); @@ -295,7 +295,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -327,7 +327,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); final XContentParseException e = expectThrows( diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 21aefd893de70..bba1fa338559f 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -109,7 +109,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; @@ -132,6 +131,7 @@ import org.elasticsearch.index.translog.TranslogOperationsUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -5522,7 +5522,7 @@ public void testSeqNoGenerator() throws IOException { source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); final Engine.Index index = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index ccf0bbebcc354..9e7f5fbbce1a3 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.RetentionLeases; @@ -54,6 +53,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -567,7 +567,7 @@ private Engine.IndexResult index(String id, String testFieldValue) throws IOExce source, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Engine.Index index = new Engine.Index(uid, engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d0cabd609158b..97f49df41d099 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -56,7 +56,6 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.ParsedDocument.DocumentSize; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -64,6 +63,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.TransportVersionUtils; @@ -3395,7 +3395,7 @@ public void testTranslogOpSerialization() throws Exception { B_1, XContentType.JSON, null, - DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); Engine.Index eIndex = new Engine.Index( diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index d83fdbd5dd46b..b3ddc313eaf3a 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -54,10 +54,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; -import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptModule; @@ -68,7 +65,6 @@ import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.cbor.CborXContent; import org.junit.Before; @@ -157,7 +153,6 @@ public void testIngestPlugin() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); @@ -178,7 +173,6 @@ public void testIngestPluginDuplicate() { List.of(DUMMY_PLUGIN, DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ) ); @@ -196,7 +190,6 @@ public void testExecuteIndexPipelineDoesNotExist() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") @@ -1194,66 +1187,6 @@ public void testExecuteBulkPipelineDoesNotExist() { verify(completionHandler, times(1)).accept(Thread.currentThread(), null); } - public void testExecuteBulkRequestCallsDocumentSizeObserver() { - /* - * This test makes sure that for both insert and upsert requests, when we call executeBulkRequest DocumentSizeObserver is - * called using a non-null index name. - */ - AtomicInteger wrappedObserverWasUsed = new AtomicInteger(0); - AtomicInteger parsedValueWasUsed = new AtomicInteger(0); - DocumentParsingProvider documentParsingProvider = new DocumentParsingProvider() { - @Override - public XContentMeteringParserDecorator newMeteringParserDecorator(DocWriteRequest request) { - return new XContentMeteringParserDecorator() { - @Override - public ParsedDocument.DocumentSize meteredDocumentSize() { - parsedValueWasUsed.incrementAndGet(); - return new ParsedDocument.DocumentSize(0, 0); - } - - @Override - public XContentParser decorate(XContentParser xContentParser) { - wrappedObserverWasUsed.incrementAndGet(); - return xContentParser; - } - }; - } - }; - IngestService ingestService = createWithProcessors( - Map.of("mock", (factories, tag, description, config) -> mockCompoundProcessor()), - documentParsingProvider - ); - - PutPipelineRequest putRequest = putJsonPipelineRequest("_id", "{\"processors\": [{\"mock\" : {}}]}"); - ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty - ClusterState previousClusterState = clusterState; - clusterState = executePut(putRequest, clusterState); - ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - - BulkRequest bulkRequest = new BulkRequest(); - UpdateRequest updateRequest = new UpdateRequest("_index", "_id1").upsert("{}", "{}"); - updateRequest.upsertRequest().setPipeline("_id"); - bulkRequest.add(updateRequest); - IndexRequest indexRequest = new IndexRequest("_index").id("_id1").source(Map.of()).setPipeline("_id1"); - bulkRequest.add(indexRequest); - @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); - @SuppressWarnings("unchecked") - final BiConsumer completionHandler = mock(BiConsumer.class); - ingestService.executeBulkRequest( - bulkRequest.numberOfActions(), - bulkRequest.requests(), - indexReq -> {}, - (s) -> false, - (slot, targetIndex, e) -> fail("Should not be redirecting failures"), - failureHandler, - completionHandler, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); - assertThat(wrappedObserverWasUsed.get(), equalTo(2)); - assertThat(parsedValueWasUsed.get(), equalTo(2)); - } - public void testExecuteSuccess() { IngestService ingestService = createWithProcessors( Map.of("mock", (factories, tag, description, config) -> mockCompoundProcessor()) @@ -2271,7 +2204,6 @@ public Map getProcessors(Processor.Parameters paramet List.of(testPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -2611,7 +2543,6 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, clusterState)); @@ -2921,13 +2852,6 @@ private static IngestService createWithProcessors() { } private static IngestService createWithProcessors(Map processors) { - return createWithProcessors(processors, DocumentParsingProvider.EMPTY_INSTANCE); - } - - private static IngestService createWithProcessors( - Map processors, - DocumentParsingProvider documentParsingProvider - ) { Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); @@ -2946,7 +2870,6 @@ public Map getProcessors(final Processor.Parameters p }), client, null, - documentParsingProvider, FailureStoreMetrics.NOOP ); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 94b3607bd7608..e8115e7266176 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -132,7 +131,6 @@ public Map getProcessors(final Processor.Parameters p List.of(ingestPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index c46d98fe1cd8b..e0363d84ea4d2 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -2405,7 +2405,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Collections.emptyList(), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ), mockFeatureService, @@ -2425,7 +2424,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { threadPool, shardStateAction, mappingUpdatedAction, - new UpdateHelper(scriptService, DocumentParsingProvider.EMPTY_INSTANCE), + new UpdateHelper(scriptService), actionFilters, indexingMemoryLimits, EmptySystemIndices.INSTANCE, diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 4713adf6cf01d..87c566d543d0f 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -100,6 +100,7 @@ import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -428,7 +429,7 @@ protected static ParsedDocument testParsedDocument( source, XContentType.JSON, mappingUpdate, - ParsedDocument.DocumentSize.UNKNOWN + XContentMeteringParserDecorator.UNKNOWN_SIZE ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index 7e88cad88dcec..bb973bf4359e8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; @@ -139,7 +138,6 @@ public void setUpVariables() { Collections.singletonList(SKINNY_INGEST_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE, FailureStoreMetrics.NOOP ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 5710b031494bf..c2e9a92e45353 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -115,7 +115,6 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.internal.DocumentParsingProvider; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -1580,7 +1579,7 @@ public void testDenialErrorMessagesForBulkIngest() throws Exception { TransportShardBulkAction.performOnPrimary( request, indexShard, - new UpdateHelper(mock(ScriptService.class), DocumentParsingProvider.EMPTY_INSTANCE), + new UpdateHelper(mock(ScriptService.class)), System::currentTimeMillis, mappingUpdater, waitForMappingUpdate, From 5e98251bdab337beb218892d30529a7290f4e5a3 Mon Sep 17 00:00:00 2001 From: Mike Pellegrini Date: Fri, 25 Oct 2024 14:57:46 -0400 Subject: [PATCH 165/202] Remove "Use ELSER By Default For Semantic Text" Changelog Entry (#115686) --- docs/changelog/113563.yaml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 docs/changelog/113563.yaml diff --git a/docs/changelog/113563.yaml b/docs/changelog/113563.yaml deleted file mode 100644 index 48484ead99d77..0000000000000 --- a/docs/changelog/113563.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113563 -summary: Use ELSER By Default For Semantic Text -area: Mapping -type: enhancement -issues: [] From ca193bb923bf2a42df06dd39c8ad50068842879a Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 25 Oct 2024 14:56:13 -0500 Subject: [PATCH 166/202] Adding additional checks for IPInfo results (#115481) --- .../geoip/IpinfoIpDataLookupsTests.java | 113 ++++++++++++++++-- 1 file changed, 106 insertions(+), 7 deletions(-) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java index d0cdc5a3e1b5e..11aa123824d18 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookupsTests.java @@ -14,8 +14,10 @@ import com.maxmind.db.Reader; import org.apache.lucene.util.Constants; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -113,7 +115,10 @@ public void testAsnFree() { entry("asn", 16625L), entry("network", "23.32.184.0/21"), entry("domain", "akamai.com") - ) + ), + Map.ofEntries(entry("name", "organization_name"), entry("asn", "asn"), entry("network", "network"), entry("domain", "domain")), + Set.of("ip"), + Set.of() ); } @@ -133,7 +138,17 @@ public void testAsnStandard() { entry("domain", "tpx.com"), entry("type", "hosting"), entry("country_iso_code", "US") - ) + ), + Map.ofEntries( + entry("name", "organization_name"), + entry("asn", "asn"), + entry("network", "network"), + entry("domain", "domain"), + entry("country", "country_iso_code"), + entry("type", "type") + ), + Set.of("ip"), + Set.of() ); } @@ -188,7 +203,16 @@ public void testCountryFree() { entry("country_iso_code", "IE"), entry("continent_name", "Europe"), entry("continent_code", "EU") - ) + ), + Map.ofEntries( + entry("continent_name", "continent_name"), + entry("continent", "continent_code"), + entry("country", "country_iso_code"), + entry("country_name", "country_name"), + entry("type", "type") + ), + Set.of("ip"), + Set.of("network") ); } @@ -208,7 +232,18 @@ public void testGeolocationStandard() { entry("timezone", "Europe/London"), entry("postal_code", "E1W"), entry("location", Map.of("lat", 51.50853, "lon", -0.12574)) - ) + ), + Map.ofEntries( + entry("country", "country_iso_code"), + entry("region", "region_name"), + entry("city", "city_name"), + entry("timezone", "timezone"), + entry("postal_code", "postal_code"), + entry("lat", "location"), + entry("lng", "location") + ), + Set.of("ip", "location"), + Set.of("geoname_id", "region_code") ); } @@ -266,7 +301,16 @@ public void testPrivacyDetectionStandard() { entry("relay", false), entry("tor", false), entry("vpn", true) - ) + ), + Map.ofEntries( + entry("hosting", "hosting"), + entry("proxy", "proxy"), + entry("relay", "relay"), + entry("tor", "tor"), + entry("vpn", "vpn") + ), + Set.of("ip"), + Set.of("network", "service") ); } @@ -286,7 +330,17 @@ public void testPrivacyDetectionStandardNonEmptyService() { entry("relay", false), entry("tor", false), entry("vpn", true) - ) + ), + Map.ofEntries( + entry("hosting", "hosting"), + entry("proxy", "proxy"), + entry("service", "service"), + entry("relay", "relay"), + entry("tor", "tor"), + entry("vpn", "vpn") + ), + Set.of("ip"), + Set.of("network") ); } @@ -438,7 +492,15 @@ private static File pathToFile(Path databasePath) { return databasePath.toFile(); } - private void assertExpectedLookupResults(String databaseName, String ip, IpDataLookup lookup, Map expected) { + private void assertExpectedLookupResults( + String databaseName, + String ip, + IpDataLookup lookup, + Map expected, + Map keyMappings, + Set knownAdditionalKeys, + Set knownMissingKeys + ) { try (DatabaseReaderLazyLoader loader = loader(databaseName)) { Map actual = lookup.getData(loader, ip); assertThat( @@ -449,6 +511,7 @@ private void assertExpectedLookupResults(String databaseName, String ip, IpDataL for (Map.Entry entry : expected.entrySet()) { assertThat("Unexpected value for key [" + entry.getKey() + "]", actual.get(entry.getKey()), equalTo(entry.getValue())); } + assertActualResultsMatchReader(actual, databaseName, ip, keyMappings, knownAdditionalKeys, knownMissingKeys); } catch (AssertionError e) { fail(e, "Assert failed for database [%s] with address [%s]", databaseName, ip); } catch (Exception e) { @@ -456,6 +519,42 @@ private void assertExpectedLookupResults(String databaseName, String ip, IpDataL } } + private void assertActualResultsMatchReader( + Map actual, + String databaseName, + String ip, + Map keyMappings, + Set knownAdditionalKeys, + Set knownMissingKeys + ) throws IOException { + Path databasePath = tmpDir.resolve(databaseName); + try (Reader reader = new Reader(pathToFile(databasePath))) { + @SuppressWarnings("unchecked") + Map data = reader.get(InetAddresses.forString(ip), Map.class); + for (String key : data.keySet()) { + if (keyMappings.containsKey(key)) { + assertTrue( + Strings.format( + "The reader returned key [%s] that is expected to map to key [%s], but [%s] did not appear in the " + + "actual data", + key, + keyMappings.get(key), + keyMappings.get(key) + ), + actual.containsKey(keyMappings.get(key)) + ); + } else if (knownMissingKeys.contains(key) == false) { + fail(null, "The reader returned unexpected key [%s]", key); + } + } + for (String key : actual.keySet()) { + if (keyMappings.containsValue(key) == false && knownAdditionalKeys.contains(key) == false) { + fail(null, "Unexpected key [%s] in results", key); + } + } + } + } + private DatabaseReaderLazyLoader loader(final String databaseName) { Path path = tmpDir.resolve(databaseName); copyDatabase("ipinfo/" + databaseName, path); // the ipinfo databases are prefixed on the test classpath From 1ed7ff50a9874491648926ebfe754204298bb939 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 07:22:31 +1100 Subject: [PATCH 167/202] Mute org.elasticsearch.xpack.shutdown.NodeShutdownIT testStalledShardMigrationProperlyDetected #115697 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 91644c9af70ca..be2c8d03c3931 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.action.update.UpdateResponseTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115689 +- class: org.elasticsearch.xpack.shutdown.NodeShutdownIT + method: testStalledShardMigrationProperlyDetected + issue: https://github.com/elastic/elasticsearch/issues/115697 # Examples: # From 9fffd2962ed8e6e53ba4ab60b5bcf54f2d73aeaa Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Fri, 25 Oct 2024 13:25:46 -0700 Subject: [PATCH 168/202] Add challenge tests for logsdb that utilize stored source (#115606) --- .../xpack/logsdb/qa/DataGenerationHelper.java | 1 + ...ndexedIntoStandardModeChallengeRestIT.java | 30 ------------ ...ndexedIntoStoredSourceChallengeRestIT.java | 48 ++++++++++++++++++ ...bVersusReindexedLogsDbChallengeRestIT.java | 30 ------------ ...VersusLogsStoredSourceChallengeRestIT.java | 22 +++++++++ ...bVersusReindexedLogsDbChallengeRestIT.java | 49 +++++++++++++++++++ 6 files changed, 120 insertions(+), 60 deletions(-) create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java create mode 100644 x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java index c03e8aea9c2ac..8a5bb8d12cd3d 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/DataGenerationHelper.java @@ -89,6 +89,7 @@ void standardMapping(XContentBuilder builder) throws IOException { } void logsDbSettings(Settings.Builder builder) { + builder.put("index.mode", "logsdb"); if (keepArraySource) { builder.put(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING.getKey(), "arrays"); } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java index 0329f7723a108..d9abdc2cde446 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT.java @@ -7,17 +7,10 @@ package org.elasticsearch.xpack.logsdb.qa; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; /** * This test compares behavior of a logsdb data stream and a standard index mode data stream @@ -52,27 +45,4 @@ public void baselineMappings(XContentBuilder builder) throws IOException { public void contenderMappings(XContentBuilder builder) throws IOException { dataGenerationHelper.standardMapping(builder); } - - @Override - public Response indexContenderDocuments(CheckedSupplier, IOException> documentsSupplier) throws IOException { - var reindexRequest = new Request("POST", "/_reindex?refresh=true"); - reindexRequest.setJsonEntity(String.format(Locale.ROOT, """ - { - "source": { - "index": "%s" - }, - "dest": { - "index": "%s", - "op_type": "create" - } - } - """, getBaselineDataStreamName(), getContenderDataStreamName())); - var response = client.performRequest(reindexRequest); - assertOK(response); - - var body = entityAsMap(response); - assertThat("encountered failures when performing reindex:\n " + body, body.get("failures"), equalTo(List.of())); - - return response; - } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java new file mode 100644 index 0000000000000..776a6faf7fa07 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedIntoStoredSourceChallengeRestIT.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This test compares behavior of a standard mode data stream and a logsdb data stream using stored source. + * There should be no differences between such two data streams. + */ +public class LogsDbVersusReindexedIntoStoredSourceChallengeRestIT extends ReindexChallengeRestIT { + public String getBaselineDataStreamName() { + return "logs-apache-baseline"; + } + + public String getContenderDataStreamName() { + return "logs-apache-reindexed"; + } + + @Override + public void baselineSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + } + + @Override + public void contenderSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } + + @Override + public void baselineMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } + + @Override + public void contenderMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java index 1c425cf30907b..8b00c647b5dd0 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/LogsDbVersusReindexedLogsDbChallengeRestIT.java @@ -7,17 +7,10 @@ package org.elasticsearch.xpack.logsdb.qa; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.List; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; /** * This test compares behavior of a logsdb data stream and a data stream containing @@ -52,27 +45,4 @@ public void baselineMappings(XContentBuilder builder) throws IOException { public void contenderMappings(XContentBuilder builder) throws IOException { dataGenerationHelper.logsDbMapping(builder); } - - @Override - public Response indexContenderDocuments(CheckedSupplier, IOException> documentsSupplier) throws IOException { - var reindexRequest = new Request("POST", "/_reindex?refresh=true"); - reindexRequest.setJsonEntity(String.format(Locale.ROOT, """ - { - "source": { - "index": "%s" - }, - "dest": { - "index": "%s", - "op_type": "create" - } - } - """, getBaselineDataStreamName(), getContenderDataStreamName())); - var response = client.performRequest(reindexRequest); - assertOK(response); - - var body = entityAsMap(response); - assertThat("encountered failures when performing reindex:\n " + body, body.get("failures"), equalTo(List.of())); - - return response; - } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java new file mode 100644 index 0000000000000..2f018b7dc0b38 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsStoredSourceChallengeRestIT.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; + +/** + * This test compares behavior of a standard mode data stream and a logsdb data stream using stored source. + * There should be no differences between such two data streams. + */ +public class StandardVersusLogsStoredSourceChallengeRestIT extends StandardVersusLogsIndexModeRandomDataChallengeRestIT { + @Override + public void contenderSettings(Settings.Builder builder) { + super.contenderSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java new file mode 100644 index 0000000000000..a0672daafb243 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb.qa; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * This test compares behavior of a logsdb data stream using stored source and a logsdb data stream + * containing data reindexed from initial data stream. + * There should be no differences between such two data streams. + */ +public class StoredSourceLogsDbVersusReindexedLogsDbChallengeRestIT extends ReindexChallengeRestIT { + public String getBaselineDataStreamName() { + return "logs-apache-baseline"; + } + + public String getContenderDataStreamName() { + return "logs-apache-reindexed"; + } + + @Override + public void baselineSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + builder.put("index.mapping.source.mode", "stored"); + } + + @Override + public void contenderSettings(Settings.Builder builder) { + dataGenerationHelper.logsDbSettings(builder); + } + + @Override + public void baselineMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } + + @Override + public void contenderMappings(XContentBuilder builder) throws IOException { + dataGenerationHelper.logsDbMapping(builder); + } +} From 38a19bfa2e4918db8ee64e8ca9a2c2c11ef82051 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 07:32:02 +1100 Subject: [PATCH 169/202] Mute org.elasticsearch.index.get.GetResultTests testToAndFromXContentEmbedded #115657 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index be2c8d03c3931..7675bcc4f2a28 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 +- class: org.elasticsearch.index.get.GetResultTests + method: testToAndFromXContentEmbedded + issue: https://github.com/elastic/elasticsearch/issues/115657 # Examples: # From 83578872d949879e93675c9e6b037072b0209ba4 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 25 Oct 2024 15:33:54 -0500 Subject: [PATCH 170/202] Fixing DatabaseNodeServiceIT testNonGzippedDatabase and testGzippedDatabase race condition (#115463) Co-authored-by: Joe Gallo --- .../ingest/geoip/DatabaseNodeServiceIT.java | 28 +++++++++++++------ muted-tests.yml | 6 ---- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java index 786f091e0c024..7331afdbf585a 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceIT.java @@ -46,15 +46,21 @@ public class DatabaseNodeServiceIT extends AbstractGeoIpIT { public void testNonGzippedDatabase() throws Exception { String databaseType = "GeoLite2-Country"; String databaseFileName = databaseType + ".mmdb"; - // making the dabase name unique so we know we're not using another one: + // making the database name unique so we know we're not using another one: String databaseName = randomAlphaOfLength(20) + "-" + databaseFileName; byte[] mmdbBytes = getBytesForFile(databaseFileName); final DatabaseNodeService databaseNodeService = internalCluster().getInstance(DatabaseNodeService.class); assertNull(databaseNodeService.getDatabase(databaseName)); int numChunks = indexData(databaseName, mmdbBytes); - retrieveDatabase(databaseNodeService, databaseName, mmdbBytes, numChunks); - assertBusy(() -> assertNotNull(databaseNodeService.getDatabase(databaseName))); - assertValidDatabase(databaseNodeService, databaseName, databaseType); + /* + * If DatabaseNodeService::checkDatabases runs it will sometimes (rarely) remove the database we are using in this test while we + * are trying to assert things about it. So if it does then we 'just' try again. + */ + assertBusy(() -> { + retrieveDatabase(databaseNodeService, databaseName, mmdbBytes, numChunks); + assertNotNull(databaseNodeService.getDatabase(databaseName)); + assertValidDatabase(databaseNodeService, databaseName, databaseType); + }); } /* @@ -64,16 +70,22 @@ public void testNonGzippedDatabase() throws Exception { public void testGzippedDatabase() throws Exception { String databaseType = "GeoLite2-Country"; String databaseFileName = databaseType + ".mmdb"; - // making the dabase name unique so we know we're not using another one: + // making the database name unique so we know we're not using another one: String databaseName = randomAlphaOfLength(20) + "-" + databaseFileName; byte[] mmdbBytes = getBytesForFile(databaseFileName); byte[] gzipBytes = gzipFileBytes(databaseName, mmdbBytes); final DatabaseNodeService databaseNodeService = internalCluster().getInstance(DatabaseNodeService.class); assertNull(databaseNodeService.getDatabase(databaseName)); int numChunks = indexData(databaseName, gzipBytes); - retrieveDatabase(databaseNodeService, databaseName, gzipBytes, numChunks); - assertBusy(() -> assertNotNull(databaseNodeService.getDatabase(databaseName))); - assertValidDatabase(databaseNodeService, databaseName, databaseType); + /* + * If DatabaseNodeService::checkDatabases runs it will sometimes (rarely) remove the database we are using in this test while we + * are trying to assert things about it. So if it does then we 'just' try again. + */ + assertBusy(() -> { + retrieveDatabase(databaseNodeService, databaseName, gzipBytes, numChunks); + assertNotNull(databaseNodeService.getDatabase(databaseName)); + assertValidDatabase(databaseNodeService, databaseName, databaseType); + }); } /* diff --git a/muted-tests.yml b/muted-tests.yml index 7675bcc4f2a28..fad1304d73059 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,12 +182,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113722 -- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT - method: testNonGzippedDatabase - issue: https://github.com/elastic/elasticsearch/issues/113821 -- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT - method: testGzippedDatabase - issue: https://github.com/elastic/elasticsearch/issues/113752 - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 From d9c776468dca054aab44512def67e51a116e1a20 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 25 Oct 2024 14:28:35 -0700 Subject: [PATCH 171/202] Enable preview features on native modules during IntelliJ import (#115698) --- .../src/main/groovy/elasticsearch.ide.gradle | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index d3209ff27ce06..67878181a005d 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -122,6 +122,36 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { .findAll { it != null } } + // force IntelliJ to generate *.iml files for each imported module + tasks.register("enableExternalConfiguration") { + group = 'ide' + description = 'Enable per-module *.iml files' + + doLast { + modifyXml('.idea/misc.xml') {xml -> + def externalStorageConfig = xml.component.find { it.'@name' == 'ExternalStorageConfigurationManager' } + if (externalStorageConfig) { + xml.remove(externalStorageConfig) + } + } + } + } + + // modifies the idea module config to enable preview features on 'elasticsearch-native' module + tasks.register("enablePreviewFeatures") { + group = 'ide' + description = 'Enables preview features on native library module' + dependsOn tasks.named("enableExternalConfiguration") + + doLast { + ['main', 'test'].each { sourceSet -> + modifyXml(".idea/modules/libs/native/elasticsearch.libs.elasticsearch-native.${sourceSet}.iml") { xml -> + xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = 'JDK_21_PREVIEW' + } + } + } + } + tasks.register('buildDependencyArtifacts') { group = 'ide' description = 'Builds artifacts needed as dependency for IDE modules' @@ -149,7 +179,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { testRunner = 'choose_per_test' } taskTriggers { - afterSync tasks.named('configureIdeCheckstyle'), tasks.named('configureIdeaGradleJvm'), tasks.named('buildDependencyArtifacts') + afterSync tasks.named('configureIdeCheckstyle'), + tasks.named('configureIdeaGradleJvm'), + tasks.named('buildDependencyArtifacts'), + tasks.named('enablePreviewFeatures') } encodings { encoding = 'UTF-8' From d887d8e045bde0a5c8c1165dd05942c96c9c048e Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 08:52:15 +1100 Subject: [PATCH 172/202] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoHash #115664 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index fad1304d73059..c0c716f2e26cf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContentEmbedded issue: https://github.com/elastic/elasticsearch/issues/115657 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoHash + issue: https://github.com/elastic/elasticsearch/issues/115664 # Examples: # From 9b951cd92ed1fb7bb242d286bef6c5ba72dfc730 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 26 Oct 2024 16:47:29 +1100 Subject: [PATCH 173/202] Mute org.elasticsearch.xpack.inference.InferenceCrudIT testSupportedStream #113430 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c0c716f2e26cf..97a4864e57f8a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoHash issue: https://github.com/elastic/elasticsearch/issues/115664 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testSupportedStream + issue: https://github.com/elastic/elasticsearch/issues/113430 # Examples: # From 2b3d41ac2771180e4c034bb5ecd565ea30fa1f87 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 27 Oct 2024 00:51:56 +1100 Subject: [PATCH 174/202] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoTile #115717 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 97a4864e57f8a..3a59af6234038 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -288,6 +288,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testSupportedStream issue: https://github.com/elastic/elasticsearch/issues/113430 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoTile + issue: https://github.com/elastic/elasticsearch/issues/115717 # Examples: # From 2f2ddad00492fcac8fbfc272607a8db91d279385 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sat, 26 Oct 2024 06:55:49 -0700 Subject: [PATCH 175/202] Improve error message for unparseable numeric settings (#115609) When a numeric setting is too large or too small such that it can't be parsed at all, the error message is the same as for garbage values. This commit improves the error message in these cases to be the same as for normal bounds checks. closes #115080 --- .../common/settings/Setting.java | 58 ++++++++++++++++--- .../common/settings/SettingTests.java | 34 +++++++++++ 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index a0b6e665042d0..aec9c108d898d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -34,6 +34,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.math.BigInteger; import java.time.Instant; import java.util.Arrays; import java.util.Collections; @@ -1485,27 +1486,68 @@ public static int parseInt(String s, int minValue, int maxValue, String key) { } public static int parseInt(String s, int minValue, int maxValue, String key, boolean isFiltered) { - int value = Integer.parseInt(s); + int value; + try { + value = Integer.parseInt(s); + } catch (NumberFormatException e) { + // check if value is a number or garbage + try { + var bi = new BigInteger(s); + // it's a number, so check which bound it is outside + if (bi.compareTo(BigInteger.valueOf(minValue)) < 0) { + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); + } else { + throw newNumericBoundsException(s, key, isFiltered, "<=", maxValue); + } + } catch (NumberFormatException e2) { + throw e; // it's garbage, use the original exception + } + } if (value < minValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be >= " + minValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); } if (value > maxValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be <= " + maxValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, "<=", maxValue); } return value; } static long parseLong(String s, long minValue, String key, boolean isFiltered) { - long value = Long.parseLong(s); + long value; + try { + value = Long.parseLong(s); + } catch (NumberFormatException e) { + // check if value is a number or garbage + try { + var bi = new BigInteger(s); + // it's a number, so check which bound it is outside + if (bi.compareTo(BigInteger.valueOf(minValue)) < 0) { + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); + } else { + throw newNumericBoundsException(s, key, isFiltered, "<=", Long.MAX_VALUE); + } + } catch (NumberFormatException e2) { + throw e; // it's garbage, use the original exception + } + } if (value < minValue) { - String err = "Failed to parse value" + (isFiltered ? "" : " [" + s + "]") + " for setting [" + key + "] must be >= " + minValue; - throw new IllegalArgumentException(err); + throw newNumericBoundsException(s, key, isFiltered, ">=", minValue); } return value; } + private static IllegalArgumentException newNumericBoundsException(String s, String key, boolean isFiltered, String type, long bound) { + String err = "Failed to parse value" + + (isFiltered ? "" : " [" + s + "]") + + " for setting [" + + key + + "] must be " + + type + + " " + + bound; + throw new IllegalArgumentException(err); + } + public static Setting intSetting(String key, int defaultValue, Property... properties) { return intSetting(key, defaultValue, Integer.MIN_VALUE, properties); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index ba78ea5cf08a6..75f5045c5fbb6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -1522,4 +1522,38 @@ public void testDeprecationPropertyValidation() { () -> Setting.boolSetting("a.bool.setting", true, Property.DeprecatedWarning, Property.IndexSettingDeprecatedInV7AndRemovedInV8) ); } + + public void testIntSettingBounds() { + Setting setting = Setting.intSetting("int.setting", 0, Integer.MIN_VALUE, Integer.MAX_VALUE); + var e = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("int.setting", "2147483648").build()) + ); + assertThat(e.getMessage(), equalTo("Failed to parse value [2147483648] for setting [int.setting] must be <= 2147483647")); + var e2 = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("int.setting", "-2147483649").build()) + ); + assertThat(e2.getMessage(), equalTo("Failed to parse value [-2147483649] for setting [int.setting] must be >= -2147483648")); + } + + public void testLongSettingBounds() { + Setting setting = Setting.longSetting("long.setting", 0, Long.MIN_VALUE); + var e = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("long.setting", "9223372036854775808").build()) + ); + assertThat( + e.getMessage(), + equalTo("Failed to parse value [9223372036854775808] for setting [long.setting] must be <= 9223372036854775807") + ); + var e2 = expectThrows( + IllegalArgumentException.class, + () -> setting.get(Settings.builder().put("long.setting", "-9223372036854775809").build()) + ); + assertThat( + e2.getMessage(), + equalTo("Failed to parse value [-9223372036854775809] for setting [long.setting] must be >= -9223372036854775808") + ); + } } From 06cdd11193a5c551ce75edd9713a52d389144f4c Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 28 Oct 2024 10:50:11 +1100 Subject: [PATCH 176/202] Retry on 403 for S3 put in certain environments (#115486) This PR configures a new retry condition for s3 client so that it retries on 403 for operations such as PUT in certain environments. Note that 403 is already retried for GET due to S3RetryingInputStream. Resolves: ES-9321 --- .../s3/S3BlobStoreRepositoryMetricsTests.java | 48 +++++++++++++++++++ .../repositories/s3/S3Service.java | 32 ++++++++++++- .../s3/AwsS3ServiceImplTests.java | 4 +- .../s3/S3ClientSettingsTests.java | 14 ++++-- .../repositories/s3/S3ServiceTests.java | 33 ++++++++++--- 5 files changed, 118 insertions(+), 13 deletions(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index 21f42bf9eb99c..b1c5d707220af 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.PluginsService; @@ -53,11 +54,13 @@ import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_TOTAL; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_TOTAL; import static org.elasticsearch.repositories.s3.S3RepositoriesMetrics.METRIC_DELETE_RETRIES_HISTOGRAM; +import static org.elasticsearch.rest.RestStatus.FORBIDDEN; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.REQUESTED_RANGE_NOT_SATISFIED; import static org.elasticsearch.rest.RestStatus.SERVICE_UNAVAILABLE; import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -320,6 +323,51 @@ public void testRetrySnapshotDeleteMetricsWhenRetriesExhausted() { assertThat(longHistogramMeasurement.get(0).getLong(), equalTo(3L)); } + public void testPutDoesNotRetryOn403InStateful() { + final Settings settings = internalCluster().getInstance(Settings.class); + assertThat(DiscoveryNode.isStateless(settings), equalTo(false)); + + final String repository = createRepository(randomRepositoryName()); + final String dataNodeName = internalCluster().getNodeNameThat(DiscoveryNode::canContainData); + final TestTelemetryPlugin plugin = getPlugin(dataNodeName); + // Exclude snapshot related purpose to avoid trigger assertions for cross-checking purpose and blob names + final OperationPurpose purpose = randomFrom( + OperationPurpose.REPOSITORY_ANALYSIS, + OperationPurpose.CLUSTER_STATE, + OperationPurpose.INDICES, + OperationPurpose.TRANSLOG + ); + final BlobContainer blobContainer = getBlobContainer(dataNodeName, repository); + final String blobName = randomIdentifier(); + + plugin.resetMeter(); + addErrorStatus(new S3ErrorResponse(FORBIDDEN, Strings.format(""" + + + InvalidAccessKeyId + The AWS Access Key Id you provided does not exist in our records. + %s + """, randomUUID()))); + + final var exception = expectThrows(IOException.class, () -> { + if (randomBoolean()) { + blobContainer.writeBlob(purpose, blobName, new BytesArray("blob"), randomBoolean()); + } else { + blobContainer.writeMetadataBlob( + purpose, + blobName, + randomBoolean(), + randomBoolean(), + outputStream -> outputStream.write("blob".getBytes()) + ); + } + }); + assertThat(exception.getCause().getMessage(), containsString("InvalidAccessKeyId")); + + assertThat(getLongCounterValue(plugin, METRIC_REQUESTS_TOTAL, Operation.PUT_OBJECT), equalTo(1L)); + assertThat(getLongCounterValue(plugin, METRIC_EXCEPTIONS_TOTAL, Operation.PUT_OBJECT), equalTo(1L)); + } + private void addErrorStatus(RestStatus... statuses) { errorResponseQueue.addAll(Arrays.stream(statuses).map(S3ErrorResponse::new).toList()); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 9042234de6f50..36eb1d61e21d7 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonServiceException; import com.amazonaws.ClientConfiguration; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; @@ -20,6 +21,8 @@ import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; +import com.amazonaws.retry.PredefinedRetryPolicies; +import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; @@ -27,6 +30,7 @@ import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; +import org.apache.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -193,7 +197,10 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); builder.withCredentials(buildCredentials(LOGGER, clientSettings, webIdentityTokenCredentialsProvider)); - builder.withClientConfiguration(buildConfiguration(clientSettings)); + final ClientConfiguration clientConfiguration = buildConfiguration(clientSettings, isStateless); + assert (isStateless == false && clientConfiguration.getRetryPolicy() == PredefinedRetryPolicies.DEFAULT) + || (isStateless && clientConfiguration.getRetryPolicy() == RETRYABLE_403_RETRY_POLICY) : "invalid retry policy configuration"; + builder.withClientConfiguration(clientConfiguration); String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; if ((endpoint.startsWith("http://") || endpoint.startsWith("https://")) == false) { @@ -223,7 +230,7 @@ protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettin } // pkg private for tests - static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { + static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings, boolean isStateless) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. @@ -248,6 +255,10 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); + if (isStateless) { + clientConfiguration.setRetryPolicy(RETRYABLE_403_RETRY_POLICY); + } + return clientConfiguration; } @@ -504,4 +515,21 @@ interface SystemEnvironment { interface JvmEnvironment { String getProperty(String key, String defaultValue); } + + static final RetryPolicy RETRYABLE_403_RETRY_POLICY = RetryPolicy.builder() + .withRetryCondition((originalRequest, exception, retriesAttempted) -> { + if (PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION.shouldRetry(originalRequest, exception, retriesAttempted)) { + return true; + } + if (exception instanceof AmazonServiceException ase) { + return ase.getStatusCode() == HttpStatus.SC_FORBIDDEN && "InvalidAccessKeyId".equals(ase.getErrorCode()); + } + return false; + }) + .withBackoffStrategy(PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY) + .withMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY) + .withHonorMaxErrorRetryInClientConfig(true) + .withHonorDefaultMaxErrorRetryInRetryMode(true) + .withHonorDefaultBackoffStrategyInRetryMode(true) + .build(); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0aac0ba898f97..43f606135291d 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -17,6 +17,7 @@ import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.retry.PredefinedRetryPolicies; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; @@ -211,7 +212,7 @@ private void launchAWSConfigurationTest( ) { final S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings); + final ClientConfiguration configuration = S3Service.buildConfiguration(clientSettings, false); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -222,6 +223,7 @@ private void launchAWSConfigurationTest( assertThat(configuration.getMaxErrorRetry(), is(expectedMaxRetries)); assertThat(configuration.useThrottledRetries(), is(expectedUseThrottleRetries)); assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); + assertThat(configuration.getRetryPolicy(), is(PredefinedRetryPolicies.DEFAULT)); } public void testEndpointSetting() { diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index ddc7a1851c663..288ac1bb3c534 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -194,9 +194,9 @@ public void testSignerOverrideCanBeSet() { ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").signerOverride, is(signerOverride)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); + ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default"), false); assertThat(defaultConfiguration.getSignerOverride(), nullValue()); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); + ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other"), false); assertThat(configuration.getSignerOverride(), is(signerOverride)); } @@ -207,12 +207,18 @@ public void testMaxConnectionsCanBeSet() { ); assertThat(settings.get("default").maxConnections, is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); assertThat(settings.get("other").maxConnections, is(maxConnections)); - ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default")); + ClientConfiguration defaultConfiguration = S3Service.buildConfiguration(settings.get("default"), false); assertThat(defaultConfiguration.getMaxConnections(), is(ClientConfiguration.DEFAULT_MAX_CONNECTIONS)); - ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other")); + ClientConfiguration configuration = S3Service.buildConfiguration(settings.get("other"), false); assertThat(configuration.getMaxConnections(), is(maxConnections)); // the default appears in the docs so let's make sure it doesn't change: assertEquals(50, ClientConfiguration.DEFAULT_MAX_CONNECTIONS); } + + public void testStatelessDefaultRetryPolicy() { + final var s3ClientSettings = S3ClientSettings.load(Settings.EMPTY).get("default"); + final var clientConfiguration = S3Service.buildConfiguration(s3ClientSettings, true); + assertThat(clientConfiguration.getRetryPolicy(), is(S3Service.RETRYABLE_403_RETRY_POLICY)); + } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java index 7bfaf56127fc7..afe1bb1a03c76 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java @@ -8,23 +8,23 @@ */ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonWebServiceRequest; +import com.amazonaws.services.s3.model.AmazonS3Exception; + import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; -import org.mockito.Mockito; import java.io.IOException; +import static org.mockito.Mockito.mock; + public class S3ServiceTests extends ESTestCase { public void testCachedClientsAreReleased() throws IOException { - final S3Service s3Service = new S3Service( - Mockito.mock(Environment.class), - Settings.EMPTY, - Mockito.mock(ResourceWatcherService.class) - ); + final S3Service s3Service = new S3Service(mock(Environment.class), Settings.EMPTY, mock(ResourceWatcherService.class)); final Settings settings = Settings.builder().put("endpoint", "http://first").build(); final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); @@ -41,4 +41,25 @@ public void testCachedClientsAreReleased() throws IOException { final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); } + + public void testRetryOn403RetryPolicy() { + final AmazonS3Exception e = new AmazonS3Exception("error"); + e.setStatusCode(403); + e.setErrorCode("InvalidAccessKeyId"); + + // Retry on 403 invalid access key id + assertTrue( + S3Service.RETRYABLE_403_RETRY_POLICY.getRetryCondition().shouldRetry(mock(AmazonWebServiceRequest.class), e, between(0, 9)) + ); + + // Not retry if not 403 or not invalid access key id + if (randomBoolean()) { + e.setStatusCode(randomValueOtherThan(403, () -> between(0, 600))); + } else { + e.setErrorCode(randomAlphaOfLength(10)); + } + assertFalse( + S3Service.RETRYABLE_403_RETRY_POLICY.getRetryCondition().shouldRetry(mock(AmazonWebServiceRequest.class), e, between(0, 9)) + ); + } } From 5fb1e23f45f71bff1176f939b9f30f942f39cd96 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Oct 2024 06:07:27 +0000 Subject: [PATCH 177/202] Clarify status of response to voting config API (#115714) These APIs return no body, just a status code. This commit clarifies that in the docs. Closes #115462 --- .../cluster/voting-exclusions.asciidoc | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index e5b8544a16554..55587a7010f8f 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -7,7 +7,6 @@ Adds or removes master-eligible nodes from the <>. - [[voting-config-exclusions-api-request]] ==== {api-request-title} @@ -28,7 +27,7 @@ users can use this API. [[voting-config-exclusions-api-desc]] ==== {api-description-title} - + By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the <> automatically @@ -50,14 +49,19 @@ use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. -If the API fails, you can safely retry it. Only a successful response -guarantees that the node has been removed from the voting configuration and -will not be reinstated. +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status +code of `200 OK` guarantees that the node has been removed from the voting +configuration and will not be reinstated until the voting configuration +exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a +response with an HTTP status code other than `200 OK` then the node may not +have been removed from the voting configuration. In that case, you may safely +retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not -required when removing master-ineligible nodes or fewer than half of the -master-eligible nodes. +required when removing master-ineligible nodes or when removing fewer than half +of the master-eligible nodes. For more information, see <>. @@ -94,7 +98,7 @@ list. Defaults to `true`, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to `false` then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. Only applies to the `DELETE` form of this API. - + [[voting-config-exclusions-api-example]] ==== {api-examples-title} @@ -102,7 +106,7 @@ Adds nodes named `nodeName1` and `nodeName2` to the voting configuration exclusions list: [source,console] --------------------------------------------------- +-------------------------------------------------- POST /_cluster/voting_config_exclusions?node_names=nodeName1,nodeName2 -------------------------------------------------- From 98cd34f3fde14cec9c5d5ac2507d4fdc55e89288 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Oct 2024 08:34:48 +0100 Subject: [PATCH 178/202] Add more tsdb and logsdb rolling upgrade indexing tests. (#115639) The main difference between other rolling upgrade tests is that these tests index more data while performing the rolling upgrade and no rollover is performed during rolling upgrade. For example this makes it more likely for merging to happen, which could uncover bwc bugs. Note that currently both test suites start trial license so that synthetic source gets used. --- .../resources/checkstyle_suppressions.xml | 2 + .../LogsIndexModeRollingUpgradeIT.java | 4 +- .../LogsdbIndexingRollingUpgradeIT.java | 253 ++++++++++++++++++ .../org/elasticsearch/upgrades/TsdbIT.java | 4 +- .../TsdbIndexingRollingUpgradeIT.java | 187 +++++++++++++ 5 files changed, 446 insertions(+), 4 deletions(-) create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index fd01993951959..5fdfebf6849e7 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -35,6 +35,8 @@ + + diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java index ba79de4ab6cd1..8c369ebc9950d 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java @@ -171,7 +171,7 @@ public void testLogsIndexing() throws IOException { } } - private static void enableLogsdbByDefault() throws IOException { + static void enableLogsdbByDefault() throws IOException { var request = new Request("PUT", "/_cluster/settings"); request.setJsonEntity(""" { @@ -214,7 +214,7 @@ private static Request rolloverDataStream(final RestClient client, final String } @SuppressWarnings("unchecked") - private static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { + static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException { final Request request = new Request("GET", "_data_stream/" + dataStreamName); final List dataStreams = (List) entityAsMap(client.performRequest(request)).get("data_streams"); final Map dataStream = (Map) dataStreams.get(0); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java new file mode 100644 index 0000000000000..9bdc43543e331 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.enableLogsdbByDefault; +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; +import static org.elasticsearch.upgrades.TsdbIT.formatInstant; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +public class LogsdbIndexingRollingUpgradeIT extends AbstractRollingUpgradeTestCase { + + static String BULK_ITEM_TEMPLATE = + """ + {"@timestamp": "$now", "host.name": "$host", "method": "$method", "ip": "$ip", "message": "$message", "length": $length, "factor": $factor} + """; + + private static final String TEMPLATE = """ + { + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "method": { + "type": "keyword" + }, + "message": { + "type": "text" + }, + "ip": { + "type": "ip" + }, + "length": { + "type": "long" + }, + "factor": { + "type": "double" + } + } + } + }"""; + + public LogsdbIndexingRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testIndexing() throws Exception { + String dataStreamName = "logs-bwc-test"; + if (isOldCluster()) { + startTrial(); + enableLogsdbByDefault(); + createTemplate(dataStreamName, "3", TEMPLATE); + + Instant startTime = Instant.now().minusSeconds(60 * 60); + bulkIndex(dataStreamName, 4, 1024, startTime); + + String firstBackingIndex = getWriteBackingIndex(client(), dataStreamName, 0); + var settings = (Map) getIndexSettingsWithDefaults(firstBackingIndex).get(firstBackingIndex); + assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("logsdb")); + assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isMixedCluster()) { + Instant startTime = Instant.now().minusSeconds(60 * 30); + bulkIndex(dataStreamName, 4, 1024, startTime); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isUpgradedCluster()) { + ensureGreen(dataStreamName); + Instant startTime = Instant.now(); + bulkIndex(dataStreamName, 4, 1024, startTime); + search(dataStreamName); + query(dataStreamName); + + var forceMergeRequest = new Request("POST", "/" + dataStreamName + "/_forcemerge"); + forceMergeRequest.addParameter("max_num_segments", "1"); + assertOK(client().performRequest(forceMergeRequest)); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } + } + + static void createTemplate(String dataStreamName, String id, String template) throws IOException { + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$DATASTREAM"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + id); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$TEMPLATE", template).replace("$DATASTREAM", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + } + + static void bulkIndex(String dataStreamName, int numRequest, int numDocs, Instant startTime) throws Exception { + for (int i = 0; i < numRequest; i++) { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + StringBuilder requestBody = new StringBuilder(); + for (int j = 0; j < numDocs; j++) { + String hostName = "host" + j % 50; // Not realistic, but makes asserting search / query response easier. + String methodName = "method" + j % 5; + String ip = NetworkAddress.format(randomIp(true)); + String message = randomAlphaOfLength(128); + long length = randomLong(); + double factor = randomDouble(); + + requestBody.append("{\"create\": {}}"); + requestBody.append('\n'); + requestBody.append( + BULK_ITEM_TEMPLATE.replace("$now", formatInstant(startTime)) + .replace("$host", hostName) + .replace("$method", methodName) + .replace("$ip", ip) + .replace("$message", message) + .replace("$length", Long.toString(length)) + .replace("$factor", Double.toString(factor)) + ); + requestBody.append('\n'); + + startTime = startTime.plusMillis(1); + } + bulkRequest.setJsonEntity(requestBody.toString()); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + } + } + + void search(String dataStreamName) throws Exception { + var searchRequest = new Request("POST", "/" + dataStreamName + "/_search"); + searchRequest.addParameter("pretty", "true"); + searchRequest.setJsonEntity(""" + { + "size": 0, + "aggs": { + "host_name": { + "terms": { + "field": "host.name", + "order": { "_key": "asc" } + }, + "aggs": { + "max_length": { + "max": { + "field": "length" + } + }, + "max_factor": { + "max": { + "field": "factor" + } + } + } + } + } + } + """); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + Integer totalCount = ObjectPath.evaluate(responseBody, "hits.total.value"); + assertThat(totalCount, greaterThanOrEqualTo(4096)); + String key = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.key"); + assertThat(key, equalTo("host0")); + Integer docCount = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.doc_count"); + assertThat(docCount, greaterThan(0)); + Double maxTx = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.max_length.value"); + assertThat(maxTx, notNullValue()); + Double maxRx = ObjectPath.evaluate(responseBody, "aggregations.host_name.buckets.0.max_factor.value"); + assertThat(maxRx, notNullValue()); + } + + void query(String dataStreamName) throws Exception { + var queryRequest = new Request("POST", "/_query"); + queryRequest.addParameter("pretty", "true"); + queryRequest.setJsonEntity(""" + { + "query": "FROM $ds | STATS max(length), max(factor) BY host.name | SORT host.name | LIMIT 5" + } + """.replace("$ds", dataStreamName)); + var response = client().performRequest(queryRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + String column1 = ObjectPath.evaluate(responseBody, "columns.0.name"); + String column2 = ObjectPath.evaluate(responseBody, "columns.1.name"); + String column3 = ObjectPath.evaluate(responseBody, "columns.2.name"); + assertThat(column1, equalTo("max(length)")); + assertThat(column2, equalTo("max(factor)")); + assertThat(column3, equalTo("host.name")); + + String key = ObjectPath.evaluate(responseBody, "values.0.2"); + assertThat(key, equalTo("host0")); + Long maxRx = ObjectPath.evaluate(responseBody, "values.0.0"); + assertThat(maxRx, notNullValue()); + Double maxTx = ObjectPath.evaluate(responseBody, "values.0.1"); + assertThat(maxTx, notNullValue()); + } + + protected static void startTrial() throws IOException { + Request startTrial = new Request("POST", "/_license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + assertOK(client().performRequest(startTrial)); + } + + static Map getIndexSettingsWithDefaults(String index) throws IOException { + Request request = new Request("GET", "/" + index + "/_settings"); + request.addParameter("flat_settings", "true"); + request.addParameter("include_defaults", "true"); + Response response = client().performRequest(request); + try (InputStream is = response.getEntity().getContent()) { + return XContentHelper.convertToMap( + XContentType.fromMediaType(response.getEntity().getContentType().getValue()).xContent(), + is, + true + ); + } + } + +} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index 9e3030d510266..6744c84f29d0f 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -33,7 +33,7 @@ public TsdbIT(@Name("upgradedNodes") int upgradedNodes) { super(upgradedNodes); } - private static final String TEMPLATE = """ + static final String TEMPLATE = """ { "settings":{ "index": { @@ -289,7 +289,7 @@ private static void assertSearch(String dataStreamName, int expectedHitCount) th assertThat(ObjectPath.evaluate(responseBody, "hits.total.value"), equalTo(expectedHitCount)); } - private static String formatInstant(Instant instant) { + static String formatInstant(Instant instant) { return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java new file mode 100644 index 0000000000000..1ac919ea57001 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.test.rest.ObjectPath; + +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.*; +import static org.elasticsearch.upgrades.TsdbIT.TEMPLATE; +import static org.elasticsearch.upgrades.TsdbIT.formatInstant; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TsdbIndexingRollingUpgradeIT extends AbstractRollingUpgradeTestCase { + + static String BULK_ITEM_TEMPLATE = + """ + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "$name", "uid":"$uid", "ip": "$ip", "network": {"tx": $tx, "rx": $rx}}}} + """; + + public TsdbIndexingRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testIndexing() throws Exception { + String dataStreamName = "k9s"; + if (isOldCluster()) { + startTrial(); + createTemplate(dataStreamName, "2", TEMPLATE); + + Instant startTime = Instant.now().minusSeconds(60 * 60); + bulkIndex(dataStreamName, 4, 1024, startTime); + + String firstBackingIndex = getWriteBackingIndex(client(), dataStreamName, 0); + var settings = (Map) getIndexSettingsWithDefaults(firstBackingIndex).get(firstBackingIndex); + assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("time_series")); + assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isMixedCluster()) { + Instant startTime = Instant.now().minusSeconds(60 * 30); + bulkIndex(dataStreamName, 4, 1024, startTime); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } else if (isUpgradedCluster()) { + ensureGreen(dataStreamName); + Instant startTime = Instant.now(); + bulkIndex(dataStreamName, 4, 1024, startTime); + search(dataStreamName); + query(dataStreamName); + + var forceMergeRequest = new Request("POST", "/" + dataStreamName + "/_forcemerge"); + forceMergeRequest.addParameter("max_num_segments", "1"); + assertOK(client().performRequest(forceMergeRequest)); + + ensureGreen(dataStreamName); + search(dataStreamName); + query(dataStreamName); + } + } + + static void bulkIndex(String dataStreamName, int numRequest, int numDocs, Instant startTime) throws Exception { + for (int i = 0; i < numRequest; i++) { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + StringBuilder requestBody = new StringBuilder(); + for (int j = 0; j < numDocs; j++) { + String podName = "pod" + j % 5; // Not realistic, but makes asserting search / query response easier. + String podUid = randomUUID(); + String podIp = NetworkAddress.format(randomIp(true)); + long podTx = randomLong(); + long podRx = randomLong(); + + requestBody.append("{\"create\": {}}"); + requestBody.append('\n'); + requestBody.append( + BULK_ITEM_TEMPLATE.replace("$now", formatInstant(startTime)) + .replace("$name", podName) + .replace("$uid", podUid) + .replace("$ip", podIp) + .replace("$tx", Long.toString(podTx)) + .replace("$rx", Long.toString(podRx)) + ); + requestBody.append('\n'); + + startTime = startTime.plusMillis(1); + } + bulkRequest.setJsonEntity(requestBody.toString()); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + } + } + + void search(String dataStreamName) throws Exception { + var searchRequest = new Request("POST", "/" + dataStreamName + "/_search"); + searchRequest.addParameter("pretty", "true"); + searchRequest.setJsonEntity(""" + { + "size": 0, + "aggs": { + "pod_name": { + "terms": { + "field": "k8s.pod.name", + "order": { "_key": "asc" } + }, + "aggs": { + "max_tx": { + "max": { + "field": "k8s.pod.network.tx" + } + }, + "max_rx": { + "max": { + "field": "k8s.pod.network.rx" + } + } + } + } + } + } + """); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + Integer totalCount = ObjectPath.evaluate(responseBody, "hits.total.value"); + assertThat(totalCount, greaterThanOrEqualTo(4096)); + String key = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.key"); + assertThat(key, equalTo("pod0")); + Integer docCount = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.doc_count"); + assertThat(docCount, greaterThan(0)); + Double maxTx = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.max_tx.value"); + assertThat(maxTx, notNullValue()); + Double maxRx = ObjectPath.evaluate(responseBody, "aggregations.pod_name.buckets.0.max_rx.value"); + assertThat(maxRx, notNullValue()); + } + + void query(String dataStreamName) throws Exception { + var queryRequest = new Request("POST", "/_query"); + queryRequest.addParameter("pretty", "true"); + queryRequest.setJsonEntity(""" + { + "query": "FROM $ds | STATS max(k8s.pod.network.rx), max(k8s.pod.network.tx) BY k8s.pod.name | SORT k8s.pod.name | LIMIT 5" + } + """.replace("$ds", dataStreamName)); + var response = client().performRequest(queryRequest); + assertOK(response); + var responseBody = entityAsMap(response); + + String column1 = ObjectPath.evaluate(responseBody, "columns.0.name"); + String column2 = ObjectPath.evaluate(responseBody, "columns.1.name"); + String column3 = ObjectPath.evaluate(responseBody, "columns.2.name"); + assertThat(column1, equalTo("max(k8s.pod.network.rx)")); + assertThat(column2, equalTo("max(k8s.pod.network.tx)")); + assertThat(column3, equalTo("k8s.pod.name")); + + String key = ObjectPath.evaluate(responseBody, "values.0.2"); + assertThat(key, equalTo("pod0")); + Long maxRx = ObjectPath.evaluate(responseBody, "values.0.0"); + assertThat(maxRx, notNullValue()); + Long maxTx = ObjectPath.evaluate(responseBody, "values.0.1"); + assertThat(maxTx, notNullValue()); + } + +} From ef85d0a53f1f58a63359b63933fc1e147167d42f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 28 Oct 2024 09:31:19 +0100 Subject: [PATCH 179/202] Avoid double instrumentation via class annotation (#115398) --- .../impl/InstrumenterImpl.java | 96 ++++++++++++--- .../impl/InstrumenterTests.java | 112 +++++++++++++++--- 2 files changed, 177 insertions(+), 31 deletions(-) diff --git a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java b/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java index 81c120ddcd6d1..7c2e1645ada83 100644 --- a/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java +++ b/distribution/tools/entitlement-agent/impl/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java @@ -15,8 +15,10 @@ import org.objectweb.asm.ClassReader; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.FieldVisitor; import org.objectweb.asm.MethodVisitor; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.RecordComponentVisitor; import org.objectweb.asm.Type; import java.io.IOException; @@ -73,7 +75,13 @@ public byte[] instrumentClass(String className, byte[] classfileBuffer) { } class EntitlementClassVisitor extends ClassVisitor { - final String className; + + private static final String ENTITLEMENT_ANNOTATION = "EntitlementInstrumented"; + + private final String className; + + private boolean isAnnotationPresent; + private boolean annotationNeeded = true; EntitlementClassVisitor(int api, ClassVisitor classVisitor, String className) { super(api, classVisitor); @@ -85,25 +93,85 @@ public void visit(int version, int access, String name, String signature, String super.visit(version, access, name + classNameSuffix, signature, superName, interfaces); } + @Override + public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { + if (visible && descriptor.equals(ENTITLEMENT_ANNOTATION)) { + isAnnotationPresent = true; + annotationNeeded = false; + } + return cv.visitAnnotation(descriptor, visible); + } + + @Override + public void visitNestMember(String nestMember) { + addClassAnnotationIfNeeded(); + super.visitNestMember(nestMember); + } + + @Override + public void visitPermittedSubclass(String permittedSubclass) { + addClassAnnotationIfNeeded(); + super.visitPermittedSubclass(permittedSubclass); + } + + @Override + public void visitInnerClass(String name, String outerName, String innerName, int access) { + addClassAnnotationIfNeeded(); + super.visitInnerClass(name, outerName, innerName, access); + } + + @Override + public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { + addClassAnnotationIfNeeded(); + return super.visitField(access, name, descriptor, signature, value); + } + + @Override + public RecordComponentVisitor visitRecordComponent(String name, String descriptor, String signature) { + addClassAnnotationIfNeeded(); + return super.visitRecordComponent(name, descriptor, signature); + } + @Override public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + addClassAnnotationIfNeeded(); var mv = super.visitMethod(access, name, descriptor, signature, exceptions); - boolean isStatic = (access & ACC_STATIC) != 0; - var key = new MethodKey( - className, - name, - Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList(), - isStatic - ); - var instrumentationMethod = instrumentationMethods.get(key); - if (instrumentationMethod != null) { - // LOGGER.debug("Will instrument method {}", key); - return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, descriptor, instrumentationMethod); - } else { - // LOGGER.trace("Will not instrument method {}", key); + if (isAnnotationPresent == false) { + boolean isStatic = (access & ACC_STATIC) != 0; + var key = new MethodKey( + className, + name, + Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList(), + isStatic + ); + var instrumentationMethod = instrumentationMethods.get(key); + if (instrumentationMethod != null) { + // LOGGER.debug("Will instrument method {}", key); + return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, descriptor, instrumentationMethod); + } else { + // LOGGER.trace("Will not instrument method {}", key); + } } return mv; } + + /** + * A class annotation can be added via visitAnnotation; we need to call visitAnnotation after all other visitAnnotation + * calls (in case one of them detects our annotation is already present), but before any other subsequent visit* method is called + * (up to visitMethod -- if no visitMethod is called, there is nothing to instrument). + * This includes visitNestMember, visitPermittedSubclass, visitInnerClass, visitField, visitRecordComponent and, of course, + * visitMethod (see {@link ClassVisitor} javadoc). + */ + private void addClassAnnotationIfNeeded() { + if (annotationNeeded) { + // logger.debug("Adding {} annotation", ENTITLEMENT_ANNOTATION); + AnnotationVisitor av = cv.visitAnnotation(ENTITLEMENT_ANNOTATION, true); + if (av != null) { + av.visitEnd(); + } + annotationNeeded = false; + } + } } static class EntitlementMethodVisitor extends MethodVisitor { diff --git a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java b/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java index e807ecee4f103..f05c7ccae62e6 100644 --- a/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java +++ b/distribution/tools/entitlement-agent/impl/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java @@ -9,20 +9,24 @@ package org.elasticsearch.entitlement.instrumentation.impl; +import org.elasticsearch.common.Strings; import org.elasticsearch.entitlement.api.EntitlementChecks; import org.elasticsearch.entitlement.api.EntitlementProvider; import org.elasticsearch.entitlement.instrumentation.InstrumentationService; -import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import org.objectweb.asm.Type; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.util.Map; +import java.util.Arrays; +import java.util.stream.Collectors; import static org.elasticsearch.entitlement.instrumentation.impl.ASMUtils.bytecode2text; +import static org.elasticsearch.entitlement.instrumentation.impl.InstrumenterImpl.getClassFileInfo; +import static org.hamcrest.Matchers.is; /** * This tests {@link InstrumenterImpl} in isolation, without a java agent. @@ -60,6 +64,10 @@ public static class ClassToInstrument implements Testable { public static void systemExit(int status) { assertEquals(123, status); } + + public static void anotherSystemExit(int status) { + assertEquals(123, status); + } } static final class TestException extends RuntimeException {} @@ -76,8 +84,11 @@ public static class TestEntitlementManager implements EntitlementChecks { */ volatile boolean isActive; + int checkSystemExitCallCount = 0; + @Override public void checkSystemExit(Class callerClass, int status) { + checkSystemExitCallCount++; assertSame(InstrumenterTests.class, callerClass); assertEquals(123, status); throwIfActive(); @@ -90,18 +101,11 @@ private void throwIfActive() { } } - public void test() throws Exception { - // This test doesn't replace ClassToInstrument in-place but instead loads a separate - // class ClassToInstrument_NEW that contains the instrumentation. Because of this, - // we need to configure the Transformer to use a MethodKey and instrumentationMethod - // with slightly different signatures (using the common interface Testable) which - // is not what would happen when it's run by the agent. - - MethodKey k1 = instrumentationService.methodKeyForTarget(ClassToInstrument.class.getMethod("systemExit", int.class)); - Method v1 = EntitlementChecks.class.getMethod("checkSystemExit", Class.class, int.class); - var instrumenter = new InstrumenterImpl("_NEW", Map.of(k1, v1)); + public void testClassIsInstrumented() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit"); - byte[] newBytecode = instrumenter.instrumentClassFile(ClassToInstrument.class).bytecodes(); + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); if (logger.isTraceEnabled()) { logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); @@ -112,22 +116,96 @@ public void test() throws Exception { newBytecode ); + getTestChecks().isActive = false; + // Before checking is active, nothing should throw - callStaticSystemExit(newClass, 123); + callStaticMethod(newClass, "systemExit", 123); getTestChecks().isActive = true; // After checking is activated, everything should throw - assertThrows(TestException.class, () -> callStaticSystemExit(newClass, 123)); + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + } + + public void testClassIsNotInstrumentedTwice() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit"); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + ClassToInstrument.class.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + getTestChecks().isActive = true; + getTestChecks().checkSystemExitCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(1)); + } + + public void testClassAllMethodsAreInstrumentedFirstPass() throws Exception { + var classToInstrument = ClassToInstrument.class; + var instrumenter = createInstrumenter(classToInstrument, "systemExit", "anotherSystemExit"); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + ClassToInstrument.class.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + getTestChecks().isActive = true; + getTestChecks().checkSystemExitCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(1)); + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "anotherSystemExit", 123)); + assertThat(getTestChecks().checkSystemExitCallCount, is(2)); + } + + /** This test doesn't replace ClassToInstrument in-place but instead loads a separate + * class ClassToInstrument_NEW that contains the instrumentation. Because of this, + * we need to configure the Transformer to use a MethodKey and instrumentationMethod + * with slightly different signatures (using the common interface Testable) which + * is not what would happen when it's run by the agent. + */ + private InstrumenterImpl createInstrumenter(Class classToInstrument, String... methodNames) throws NoSuchMethodException { + Method v1 = EntitlementChecks.class.getMethod("checkSystemExit", Class.class, int.class); + var methods = Arrays.stream(methodNames).map(name -> { + try { + return instrumentationService.methodKeyForTarget(classToInstrument.getMethod(name, int.class)); + } catch (NoSuchMethodException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toUnmodifiableMap(name -> name, name -> v1)); + + return new InstrumenterImpl("_NEW", methods); } /** * Calling a static method of a dynamically loaded class is significantly more cumbersome * than calling a virtual method. */ - private static void callStaticSystemExit(Class c, int status) throws NoSuchMethodException, IllegalAccessException { + private static void callStaticMethod(Class c, String methodName, int status) throws NoSuchMethodException, IllegalAccessException { try { - c.getMethod("systemExit", int.class).invoke(null, status); + c.getMethod(methodName, int.class).invoke(null, status); } catch (InvocationTargetException e) { Throwable cause = e.getCause(); if (cause instanceof TestException n) { From c4c33ff359b99e855306542d6cc077661e21383d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 28 Oct 2024 09:33:14 +0100 Subject: [PATCH 180/202] Fix NPE on plugin sync (#115640) --- .../plugins/cli/SyncPluginsAction.java | 8 ++++---- .../plugins/cli/SyncPluginsActionTests.java | 16 ++++++++++++++++ docs/changelog/115640.yaml | 6 ++++++ 3 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/115640.yaml diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java index 5394cb8f3d79b..d6d0619422770 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java @@ -25,6 +25,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -60,7 +61,7 @@ public SyncPluginsAction(Terminal terminal, Environment env) { * @throws UserException if a plugins config file is found. */ public static void ensureNoConfigFile(Environment env) throws UserException { - final Path pluginsConfig = env.configFile().resolve("elasticsearch-plugins.yml"); + final Path pluginsConfig = env.configFile().resolve(ELASTICSEARCH_PLUGINS_YML); if (Files.exists(pluginsConfig)) { throw new UserException( ExitCodes.USAGE, @@ -207,9 +208,8 @@ private List getPluginsToUpgrade( Optional cachedPluginsConfig, List existingPlugins ) { - final Map cachedPluginIdToLocation = cachedPluginsConfig.map( - config -> config.getPlugins().stream().collect(Collectors.toMap(InstallablePlugin::getId, InstallablePlugin::getLocation)) - ).orElse(Map.of()); + final Map cachedPluginIdToLocation = new HashMap<>(); + cachedPluginsConfig.ifPresent(config -> config.getPlugins().forEach(p -> cachedPluginIdToLocation.put(p.getId(), p.getLocation()))); return pluginsToMaybeUpgrade.stream().filter(eachPlugin -> { final String eachPluginId = eachPlugin.getId(); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java index 8ef44c8862e84..2d2336428a0a5 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java @@ -157,6 +157,22 @@ public void test_getPluginChanges_withOfficialPluginToUpgrade_returnsPluginToUpg assertThat(pluginChanges.upgrade.get(0).getId(), equalTo("analysis-icu")); } + /** + * Check that when there is an official plugin in the config file and in the cached config, then we + * calculate that the plugin does not need to be upgraded. + */ + public void test_getPluginChanges_withOfficialPluginCachedConfigAndNoChanges_returnsNoChanges() throws Exception { + createPlugin("analysis-icu"); + config.setPlugins(List.of(new InstallablePlugin("analysis-icu"))); + + final PluginsConfig cachedConfig = new PluginsConfig(); + cachedConfig.setPlugins(List.of(new InstallablePlugin("analysis-icu"))); + + final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.of(cachedConfig)); + + assertThat(pluginChanges.isEmpty(), is(true)); + } + /** * Check that if an unofficial plugins' location has not changed in the cached config, then we * calculate that the plugin does not need to be upgraded. diff --git a/docs/changelog/115640.yaml b/docs/changelog/115640.yaml new file mode 100644 index 0000000000000..5c4a943a9697d --- /dev/null +++ b/docs/changelog/115640.yaml @@ -0,0 +1,6 @@ +pr: 115640 +summary: Fix NPE on plugin sync +area: Infra/CLI +type: bug +issues: + - 114818 From 918a9cc35ada3a348f0bd4ed24e7ab6f836d468e Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 28 Oct 2024 09:47:48 +0000 Subject: [PATCH 181/202] Make some chunked xcontent more efficient (#115512) --- .../org/elasticsearch/action/bulk/BulkResponse.java | 12 ++++++------ .../common/xcontent/ChunkedToXContentBuilder.java | 8 ++------ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 88a9fb56b8edb..ec7a08007de93 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -158,13 +158,13 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params params) { - return ChunkedToXContent.builder(params).object(ob -> { - ob.field(ERRORS, hasFailures()); - ob.field(TOOK, tookInMillis); + return ChunkedToXContent.builder(params).object(ob -> ob.append((b, p) -> { + b.field(ERRORS, hasFailures()); + b.field(TOOK, tookInMillis); if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) { - ob.field(INGEST_TOOK, ingestTookInMillis); + b.field(INGEST_TOOK, ingestTookInMillis); } - ob.array(ITEMS, Iterators.forArray(responses)); - }); + return b; + }).array(ITEMS, Iterators.forArray(responses))); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java index a3141bff7c6e2..a3243ef3865a7 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentBuilder.java @@ -58,9 +58,7 @@ private void endObject() { * Creates an object, with the specified {@code contents} */ public ChunkedToXContentBuilder xContentObject(ToXContent contents) { - startObject(); - append(contents); - endObject(); + addChunk((b, p) -> contents.toXContent(b.startObject(), p).endObject()); return this; } @@ -68,9 +66,7 @@ public ChunkedToXContentBuilder xContentObject(ToXContent contents) { * Creates an object named {@code name}, with the specified {@code contents} */ public ChunkedToXContentBuilder xContentObject(String name, ToXContent contents) { - startObject(name); - append(contents); - endObject(); + addChunk((b, p) -> contents.toXContent(b.startObject(name), p).endObject()); return this; } From 0d8d8bd39282dd146ead0e8000a96d31587127de Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 28 Oct 2024 11:05:44 +0100 Subject: [PATCH 182/202] [DOCS] Add search and filtering tutorial/quickstart, edit filtering page (#114353) --- .../query-dsl/query_filter_context.asciidoc | 47 +- .../full-text-filtering-tutorial.asciidoc | 626 ++++++++++++++++++ docs/reference/quickstart/index.asciidoc | 4 +- 3 files changed, 662 insertions(+), 15 deletions(-) create mode 100644 docs/reference/quickstart/full-text-filtering-tutorial.asciidoc diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 78e1549644aa6..1fd75ef0e841d 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -29,26 +29,45 @@ parameter, such as the `query` parameter in the [discrete] [[filter-context]] === Filter context -In a filter context, a query clause answers the question ``__Does this -document match this query clause?__'' The answer is a simple Yes or No -- no -scores are calculated. Filter context is mostly used for filtering structured -data, e.g. -* __Does this +timestamp+ fall into the range 2015 to 2016?__ -* __Is the +status+ field set to ++"published"++__? +A filter answers the binary question “Does this document match this query clause?”. The answer is simply "yes" or "no". +Filtering has several benefits: -Frequently used filters will be cached automatically by Elasticsearch, to -speed up performance. +. *Simple binary logic*: In a filter context, a query clause determines document matches based on a yes/no criterion, without score calculation. +. *Performance*: Because they don't compute relevance scores, filters execute faster than queries. +. *Caching*: {es} automatically caches frequently used filters, speeding up subsequent search performance. +. *Resource efficiency*: Filters consume less CPU resources compared to full-text queries. +. *Query combination*: Filters can be combined with scored queries to refine result sets efficiently. -Filter context is in effect whenever a query clause is passed to a `filter` -parameter, such as the `filter` or `must_not` parameters in the -<> query, the `filter` parameter in the -<> query, or the -<> aggregation. +Filters are particularly effective for querying structured data and implementing "must have" criteria in complex searches. + +Structured data refers to information that is highly organized and formatted in a predefined manner. In the context of Elasticsearch, this typically includes: + +* Numeric fields (integers, floating-point numbers) +* Dates and timestamps +* Boolean values +* Keyword fields (exact match strings) +* Geo-points and geo-shapes + +Unlike full-text fields, structured data has a consistent, predictable format, making it ideal for precise filtering operations. + +Common filter applications include: + +* Date range checks: for example is the `timestamp` field between 2015 and 2016 +* Specific field value checks: for example is the `status` field equal to "published" or is the `author` field equal to "John Doe" + +Filter context applies when a query clause is passed to a `filter` parameter, such as: + +* `filter` or `must_not` parameters in <> queries +* `filter` parameter in <> queries +* <> aggregations + +Filters optimize query performance and efficiency, especially for structured data queries and when combined with full-text searches. [discrete] [[query-filter-context-ex]] === Example of query and filter contexts + Below is an example of query clauses being used in query and filter context in the `search` API. This query will match documents where all of the following conditions are met: @@ -93,4 +112,4 @@ significand's precision will be converted to floats with loss of precision. TIP: Use query clauses in query context for conditions which should affect the score of matching documents (i.e. how well does the document match), and use -all other query clauses in filter context. \ No newline at end of file +all other query clauses in filter context. diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc new file mode 100644 index 0000000000000..46cadc19f2547 --- /dev/null +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -0,0 +1,626 @@ +[[full-text-filter-tutorial]] +== Basic full-text search and filtering in {es} +++++ +Basics: Full-text search and filtering +++++ + +This is a hands-on introduction to the basics of full-text search with {es}, also known as _lexical search_, using the <> and <>. +You'll also learn how to filter data, to narrow down search results based on exact criteria. + +In this scenario, we're implementing a search function for a cooking blog. +The blog contains recipes with various attributes including textual content, categorical data, and numerical ratings. + +The goal is to create search queries that enable users to: + +* Find recipes based on ingredients they want to use or avoid +* Discover dishes suitable for their dietary needs +* Find highly-rated recipes in specific categories +* Find recent recipes from their favorite authors + +To achieve these goals we'll use different Elasticsearch queries to perform full-text search, apply filters, and combine multiple search criteria. + +[discrete] +[[full-text-filter-tutorial-create-index]] +=== Step 1: Create an index + +Create the `cooking_blog` index to get started: + +[source,console] +---- +PUT /cooking_blog +---- +// TESTSETUP + +Now define the mappings for the index: + +[source,console] +---- +PUT /cooking_blog/_mapping +{ + "properties": { + "title": { + "type": "text", + "analyzer": "standard", <1> + "fields": { <2> + "keyword": { + "type": "keyword", + "ignore_above": 256 <3> + } + } + }, + "description": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + }, + "category": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "tags": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "rating": { + "type": "float" + } + } +} +---- +// TEST +<1> The `standard` analyzer is used by default for `text` fields if an `analyzer` isn't specified. It's included here for demonstration purposes. +<2> <> are used here to index `text` fields as both `text` and `keyword` <>. This enables both full-text search and exact matching/filtering on the same field. +Note that if you used <>, these multi-fields would be created automatically. +<3> The <> prevents indexing values longer than 256 characters in the `keyword` field. Again this is the default value, but it's included here for for demonstration purposes. +It helps to save disk space and avoid potential issues with Lucene's term byte-length limit. + +[TIP] +==== +Full-text search is powered by <>. +Text analysis normalizes and standardizes text data so it can be efficiently stored in an inverted index and searched in near real-time. +Analysis happens at both <>. +This tutorial won't cover analysis in detail, but it's important to understand how text is processed to create effective search queries. +==== + +[discrete] +[[full-text-filter-tutorial-index-data]] +=== Step 2: Add sample blog posts to your index + +Now you'll need to index some example blog posts using the <>. +Note that `text` fields are analyzed and multi-fields are generated at index time. + +[source,console] +---- +POST /cooking_blog/_bulk?refresh=wait_for +{"index":{"_id":"1"}} +{"title":"Perfect Pancakes: A Fluffy Breakfast Delight","description":"Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.","author":"Maria Rodriguez","date":"2023-05-01","category":"Breakfast","tags":["pancakes","breakfast","easy recipes"],"rating":4.8} +{"index":{"_id":"2"}} +{"title":"Spicy Thai Green Curry: A Vegetarian Adventure","description":"Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.","author":"Liam Chen","date":"2023-05-05","category":"Main Course","tags":["thai","vegetarian","curry","spicy"],"rating":4.6} +{"index":{"_id":"3"}} +{"title":"Classic Beef Stroganoff: A Creamy Comfort Food","description":"Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.","author":"Emma Watson","date":"2023-05-10","category":"Main Course","tags":["beef","pasta","comfort food"],"rating":4.7} +{"index":{"_id":"4"}} +{"title":"Vegan Chocolate Avocado Mousse","description":"Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.","author":"Alex Green","date":"2023-05-15","category":"Dessert","tags":["vegan","chocolate","avocado","healthy dessert"],"rating":4.5} +{"index":{"_id":"5"}} +{"title":"Crispy Oven-Fried Chicken","description":"Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.","author":"Maria Rodriguez","date":"2023-05-20","category":"Main Course","tags":["chicken","oven-fried","healthy"],"rating":4.9} +---- +// TEST[continued] + +[discrete] +[[full-text-filter-tutorial-match-query]] +=== Step 3: Perform basic full-text searches + +Full-text search involves executing text-based queries across one or more document fields. +These queries calculate a relevance score for each matching document, based on how closely the document's content aligns with the search terms. +{es} offers various query types, each with its own method for matching text and <>. + +[discrete] +==== `match` query + +The <> query is the standard query for full-text, or "lexical", search. +The query text will be analyzed according to the analyzer configuration specified on each field (or at query time). + +First, search the `description` field for "fluffy pancakes": + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "description": { + "query": "fluffy pancakes" <1> + } + } + } +} +---- +// TEST[continued] +<1> By default, the `match` query uses `OR` logic between the resulting tokens. This means it will match documents that contain either "fluffy" or "pancakes", or both, in the description field. + +At search time, {es} defaults to the analyzer defined in the field mapping. In this example, we're using the `standard` analyzer. Using a different analyzer at search time is an <>. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { <1> + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1.8378843, <2> + "hits": [ + { + "_index": "cooking_blog", + "_id": "1", + "_score": 1.8378843, <3> + "_source": { + "title": "Perfect Pancakes: A Fluffy Breakfast Delight", <4> + "description": "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", <5> + "author": "Maria Rodriguez", + "date": "2023-05-01", + "category": "Breakfast", + "tags": [ + "pancakes", + "breakfast", + "easy recipes" + ], + "rating": 4.8 + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +// TESTRESPONSE[s/"total": 1/"total": $body._shards.total/] +// TESTRESPONSE[s/"successful": 1/"successful": $body._shards.successful/] +// TESTRESPONSE[s/"value": 1/"value": $body.hits.total.value/] +// TESTRESPONSE[s/"max_score": 1.8378843/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"_score": 1.8378843/"_score": $body.hits.hits.0._score/] +<1> The `hits` object contains the total number of matching documents and their relation to the total. Refer to <> for more details about the `hits` object. +<2> `max_score` is the highest relevance score among all matching documents. In this example, we only have one matching document. +<3> `_score` is the relevance score for a specific document, indicating how well it matches the query. Higher scores indicate better matches. In this example the `max_score` is the same as the `_score`, as there is only one matching document. +<4> The title contains both "Fluffy" and "Pancakes", matching our search terms exactly. +<5> The description includes "fluffiest" and "pancakes", further contributing to the document's relevance due to the analysis process. +============== + +[discrete] +==== Require all terms in a match query + +Specify the `and` operator to require both terms in the `description` field. +This stricter search returns _zero hits_ on our sample data, as no document contains both "fluffy" and "pancakes" in the description. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "description": { + "query": "fluffy pancakes", + "operator": "and" + } + } + } +} +---- +// TEST[continued] + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 0, + "relation": "eq" + }, + "max_score": null, + "hits": [] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +============== + +[discrete] +==== Specify a minimum number of terms to match + +Use the <> parameter to specify the minimum number of terms a document should have to be included in the search results. + +Search the title field to match at least 2 of the 3 terms: "fluffy", "pancakes", or "breakfast". +This is useful for improving relevance while allowing some flexibility. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "match": { + "title": { + "query": "fluffy pancakes breakfast", + "minimum_should_match": 2 + } + } + } +} +---- +// TEST[continued] + +[discrete] +[[full-text-filter-tutorial-multi-match]] +=== Step 4: Search across multiple fields at once + +When users enter a search query, they often don't know (or care) whether their search terms appear in a specific field. +A <> query allows searching across multiple fields simultaneously. + +Let's start with a basic `multi_match` query: + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "multi_match": { + "query": "vegetarian curry", + "fields": ["title", "description", "tags"] + } + } +} +---- +// TEST[continued] + +This query searches for "vegetarian curry" across the title, description, and tags fields. Each field is treated with equal importance. + +However, in many cases, matches in certain fields (like the title) might be more relevant than others. We can adjust the importance of each field using field boosting: + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "multi_match": { + "query": "vegetarian curry", + "fields": ["title^3", "description^2", "tags"] <1> + } + } +} +---- +// TEST[continued] +<1> The `^` syntax applies a boost to specific fields: ++ +* `title^3`: The title field is 3 times more important than an unboosted field +* `description^2`: The description is 2 times more important +* `tags`: No boost applied (equivalent to `^1`) ++ +These boosts help tune relevance, prioritizing matches in the title over the description, and matches in the description over tags. + +Learn more about fields and per-field boosting in the <> reference. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 0, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 7.546015, + "hits": [ + { + "_index": "cooking_blog", + "_id": "2", + "_score": 7.546015, + "_source": { + "title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1> + "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2> + "author": "Liam Chen", + "date": "2023-05-05", + "category": "Main Course", + "tags": [ + "thai", + "vegetarian", + "curry", + "spicy" + ], <3> + "rating": 4.6 + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 0/"took": "$body.took"/] +// TESTRESPONSE[s/"_score": 7.546015/"_score": $body.hits.hits.0._score/] +// TESTRESPONSE[s/"max_score": 7.546015/"max_score": $body.hits.max_score/] +<1> The title contains "Vegetarian" and "Curry", which matches our search terms. The title field has the highest boost (^3), contributing significantly to this document's relevance score. +<2> The description contains "curry" and related terms like "vegetables", further increasing the document's relevance. +<3> The tags include both "vegetarian" and "curry", providing an exact match for our search terms, albeit with no boost. + +This result demonstrates how the `multi_match` query with field boosts helps users find relevant recipes across multiple fields. +Even though the exact phrase "vegetarian curry" doesn't appear in any single field, the combination of matches across fields produces a highly relevant result. +============== + +[TIP] +==== +The `multi_match` query is often recommended over a single `match` query for most text search use cases, as it provides more flexibility and better matches user expectations. +==== + +[discrete] +[[full-text-filter-tutorial-filtering]] +=== Step 5: Filter and find exact matches + +<> allows you to narrow down your search results based on exact criteria. +Unlike full-text searches, filters are binary (yes/no) and do not affect the relevance score. +Filters execute faster than queries because excluded results don't need to be scored. + +This <> query will return only blog posts in the "Breakfast" category. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "bool": { + "filter": [ + { "term": { "category.keyword": "Breakfast" } } <1> + ] + } + } +} +---- +// TEST[continued] +<1> Note the use of `category.keyword` here. This refers to the <> multi-field of the `category` field, ensuring an exact, case-sensitive match. + +[TIP] +==== +The `.keyword` suffix accesses the unanalyzed version of a field, enabling exact, case-sensitive matching. This works in two scenarios: + +1. *When using dynamic mapping for text fields*. Elasticsearch automatically creates a `.keyword` sub-field. +2. *When text fields are explicitly mapped with a `.keyword` sub-field*. For example, we explicitly mapped the `category` field in <> of this tutorial. +==== + +[discrete] +[[full-text-filter-tutorial-range-query]] +==== Search for posts within a date range + +Often users want to find content published within a specific time frame. +A <> query finds documents that fall within numeric or date ranges. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "range": { + "date": { + "gte": "2023-05-01", <1> + "lte": "2023-05-31" <2> + } + } + } +} +---- +// TEST[continued] +<1> Greater than or equal to May 1, 2023. +<2> Less than or equal to May 31, 2023. + +[discrete] +[[full-text-filter-tutorial-term-query]] +==== Find exact matches + +Sometimes users want to search for exact terms to eliminate ambiguity in their search results. +A <> query searches for an exact term in a field without analyzing it. +Exact, case-sensitive matches on specific terms are often referred to as "keyword" searches. + +Here you'll search for the author "Maria Rodriguez" in the `author.keyword` field. + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "term": { + "author.keyword": "Maria Rodriguez" <1> + } + } +} +---- +// TEST[continued] +<1> The `term` query has zero flexibility. For example, here the queries `maria` or `maria rodriguez` would have zero hits, due to case sensitivity. + +[TIP] +==== +Avoid using the `term` query for <> because they are transformed by the analysis process. +==== + +[discrete] +[[full-text-filter-tutorial-complex-bool]] +=== Step 6: Combine multiple search criteria + +A <> query allows you to combine multiple query clauses to create sophisticated searches. +In this tutorial scenario it's useful for when users have complex requirements for finding recipes. + +Let's create a query that addresses the following user needs: + +* Must be a vegetarian main course +* Should contain "curry" or "spicy" in the title or description +* Must not be a dessert +* Must have a rating of at least 4.5 +* Should prefer recipes published in the last month + +[source,console] +---- +GET /cooking_blog/_search +{ + "query": { + "bool": { + "must": [ + { + "term": { + "category.keyword": "Main Course" + } + }, + { + "term": { + "tags": "vegetarian" + } + }, + { + "range": { + "rating": { + "gte": 4.5 + } + } + } + ], + "should": [ + { + "multi_match": { + "query": "curry spicy", + "fields": ["title^2", "description"] + } + }, + { + "range": { + "date": { + "gte": "now-1M/d" + } + } + } + ], + "must_not": [ <1> + { + "term": { + "category.keyword": "Dessert" + } + } + ] + } + } +} +---- +// TEST[continued] +<1> The `must_not` clause excludes documents that match the specified criteria. This is a powerful tool for filtering out unwanted results. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 1, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 7.9835095, + "hits": [ + { + "_index": "cooking_blog", + "_id": "2", + "_score": 7.9835095, + "_source": { + "title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1> + "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2> + "author": "Liam Chen", + "date": "2023-05-05", + "category": "Main Course", <3> + "tags": [ <4> + "thai", + "vegetarian", <5> + "curry", + "spicy" + ], + "rating": 4.6 <6> + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 1/"took": "$body.took"/] +<1> The title contains "Spicy" and "Curry", matching our should condition. With the default <> behavior, this field contributes most to the relevance score. +<2> While the description also contains matching terms, only the best matching field's score is used by default. +<3> The recipe was published within the last month, satisfying our recency preference. +<4> The "Main Course" category matches our `must` condition. +<5> The "vegetarian" tag satisfies another `must` condition, while "curry" and "spicy" tags align with our `should` preferences. +<6> The rating of 4.6 meets our minimum rating requirement of 4.5. +============== + +[discrete] +[[full-text-filter-tutorial-learn-more]] +=== Learn more + +This tutorial introduced the basics of full-text search and filtering in {es}. +Building a real-world search experience requires understanding many more advanced concepts and techniques. +Here are some resources once you're ready to dive deeper: + +* <>: Understand all your options for searching and analyzing data in {es}. +* <>: Understand how text is processed for full-text search. +* <>: Learn about more advanced search techniques using the `_search` API, including semantic search. + + diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc index 2d9114882254f..ed4c128392994 100644 --- a/docs/reference/quickstart/index.asciidoc +++ b/docs/reference/quickstart/index.asciidoc @@ -15,7 +15,8 @@ Get started <> , or see our <>. Learn about indices, documents, and mappings, and perform a basic search. +* <>. Learn about indices, documents, and mappings, and perform a basic search using the Query DSL. +* <>. Learn about different options for querying data, including full-text search and filtering, using the Query DSL. [discrete] [[quickstart-python-links]] @@ -27,3 +28,4 @@ If you're interested in using {es} with Python, check out Elastic Search Labs: * https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: This walks you through building a complete search solution with {es} from the ground up using Flask. include::getting-started.asciidoc[] +include::full-text-filtering-tutorial.asciidoc[] From 78ccd2a4a216c25c6bf75833295a9f2d423fc19d Mon Sep 17 00:00:00 2001 From: Souradip Poddar <49103513+SouradipPoddar@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:48:34 +0530 Subject: [PATCH 183/202] 112274 converted cpu stats to support unsigned 64 bit number (#114681) --- docs/changelog/114681.yaml | 6 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../common/io/stream/StreamOutput.java | 7 +++ .../org/elasticsearch/monitor/os/OsProbe.java | 48 ++++++++------- .../org/elasticsearch/monitor/os/OsStats.java | 61 +++++++++++++------ .../cluster/node/stats/NodeStatsTests.java | 9 ++- .../monitor/os/OsProbeTests.java | 24 ++++---- .../monitor/os/OsStatsTests.java | 11 +++- .../node/NodeStatsMonitoringDocTests.java | 11 +++- 9 files changed, 119 insertions(+), 59 deletions(-) create mode 100644 docs/changelog/114681.yaml diff --git a/docs/changelog/114681.yaml b/docs/changelog/114681.yaml new file mode 100644 index 0000000000000..2a9901114e56f --- /dev/null +++ b/docs/changelog/114681.yaml @@ -0,0 +1,6 @@ +pr: 114681 +summary: "Support for unsigned 64 bit numbers in Cpu stats" +area: Infra/Core +type: enhancement +issues: + - 112274 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3986ea4b97254..9454c27dd787c 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -183,6 +183,7 @@ static TransportVersion def(int id) { public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); public static final TransportVersion INDEX_REQUEST_REMOVE_METERING = def(8_780_00_0); + public static final TransportVersion CPU_STAT_STRING_PARSING = def(8_781_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index c449065a953e2..d724e5ea25ca6 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -1234,4 +1234,11 @@ public void writeMissingWriteable(Class ignored) throws public void writeMissingString() throws IOException { writeBoolean(false); } + + /** + * Write a {@link BigInteger} to the stream + */ + public void writeBigInteger(BigInteger bigInteger) throws IOException { + writeString(bigInteger.toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 799264d8392b1..06ab6a6eee410 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -22,6 +22,7 @@ import java.lang.management.OperatingSystemMXBean; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.math.BigInteger; import java.nio.file.Files; import java.nio.file.Path; import java.util.Collections; @@ -341,8 +342,8 @@ List readProcSelfCgroup() throws IOException { * @return the total CPU time in nanoseconds * @throws IOException if an I/O exception occurs reading {@code cpuacct.usage} for the control group */ - private long getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException { - return Long.parseLong(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup)); + private BigInteger getCgroupCpuAcctUsageNanos(final String controlGroup) throws IOException { + return new BigInteger(readSysFsCgroupCpuAcctCpuAcctUsage(controlGroup)); } /** @@ -435,21 +436,22 @@ String readSysFsCgroupCpuAcctCpuAcctCfsQuota(final String controlGroup) throws I * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group */ private OsStats.Cgroup.CpuStat getCgroupCpuAcctCpuStat(final String controlGroup) throws IOException { + final var SENTINEL_VALUE = BigInteger.valueOf(-1); final List lines = readSysFsCgroupCpuAcctCpuStat(controlGroup); - long numberOfPeriods = -1; - long numberOfTimesThrottled = -1; - long timeThrottledNanos = -1; + var numberOfPeriods = SENTINEL_VALUE; + var numberOfTimesThrottled = SENTINEL_VALUE; + var timeThrottledNanos = SENTINEL_VALUE; for (final String line : lines) { final String[] fields = line.split("\\s+"); switch (fields[0]) { - case "nr_periods" -> numberOfPeriods = Long.parseLong(fields[1]); - case "nr_throttled" -> numberOfTimesThrottled = Long.parseLong(fields[1]); - case "throttled_time" -> timeThrottledNanos = Long.parseLong(fields[1]); + case "nr_periods" -> numberOfPeriods = new BigInteger(fields[1]); + case "nr_throttled" -> numberOfTimesThrottled = new BigInteger(fields[1]); + case "throttled_time" -> timeThrottledNanos = new BigInteger(fields[1]); } } - assert numberOfPeriods != -1; - assert numberOfTimesThrottled != -1; - assert timeThrottledNanos != -1; + assert numberOfPeriods.equals(SENTINEL_VALUE) == false; + assert numberOfTimesThrottled.equals(SENTINEL_VALUE) == false; + assert timeThrottledNanos.equals(SENTINEL_VALUE) == false; return new OsStats.Cgroup.CpuStat(numberOfPeriods, numberOfTimesThrottled, timeThrottledNanos); } @@ -635,28 +637,30 @@ boolean areCgroupStatsAvailable() throws IOException { * @throws IOException if an I/O exception occurs reading {@code cpu.stat} for the control group */ @SuppressForbidden(reason = "Uses PathUtils.get to generate meaningful assertion messages") - private Map getCgroupV2CpuStats(String controlGroup) throws IOException { + private Map getCgroupV2CpuStats(String controlGroup) throws IOException { final List lines = readCgroupV2CpuStats(controlGroup); - final Map stats = new HashMap<>(); + final Map stats = new HashMap<>(); + final BigInteger SENTINEL_VALUE = BigInteger.valueOf(-1); for (String line : lines) { String[] parts = line.split("\\s+"); assert parts.length == 2 : "Corrupt cpu.stat line: [" + line + "]"; - stats.put(parts[0], Long.parseLong(parts[1])); + stats.put(parts[0], new BigInteger(parts[1])); } final List expectedKeys = List.of("system_usec", "usage_usec", "user_usec"); expectedKeys.forEach(key -> { assert stats.containsKey(key) : "[" + key + "] missing from " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat"); - assert stats.get(key) != -1 : stats.get(key); + assert stats.get(key).compareTo(SENTINEL_VALUE) != 0 : stats.get(key).toString(); }); final List optionalKeys = List.of("nr_periods", "nr_throttled", "throttled_usec"); optionalKeys.forEach(key -> { if (stats.containsKey(key) == false) { - stats.put(key, 0L); + stats.put(key, BigInteger.ZERO); } - assert stats.get(key) != -1L : "[" + key + "] in " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat") + " is -1"; + assert stats.get(key).compareTo(SENTINEL_VALUE) != 0 + : "[" + key + "] in " + PathUtils.get("/sys/fs/cgroup", controlGroup, "cpu.stat") + " is -1"; }); return stats; @@ -682,7 +686,7 @@ private OsStats.Cgroup getCgroup() { assert controllerMap.isEmpty() == false; final String cpuAcctControlGroup; - final long cgroupCpuAcctUsageNanos; + final BigInteger cgroupCpuAcctUsageNanos; final long cgroupCpuAcctCpuCfsPeriodMicros; final long cgroupCpuAcctCpuCfsQuotaMicros; final String cpuControlGroup; @@ -696,9 +700,11 @@ private OsStats.Cgroup getCgroup() { cpuControlGroup = cpuAcctControlGroup = memoryControlGroup = controllerMap.get(""); // `cpuacct` was merged with `cpu` in v2 - final Map cpuStatsMap = getCgroupV2CpuStats(cpuControlGroup); + final Map cpuStatsMap = getCgroupV2CpuStats(cpuControlGroup); - cgroupCpuAcctUsageNanos = cpuStatsMap.get("usage_usec") * 1000; // convert from micros to nanos + final BigInteger THOUSAND = BigInteger.valueOf(1000); + + cgroupCpuAcctUsageNanos = cpuStatsMap.get("usage_usec").multiply(THOUSAND); // convert from micros to nanos long[] cpuLimits = getCgroupV2CpuLimit(cpuControlGroup); cgroupCpuAcctCpuCfsQuotaMicros = cpuLimits[0]; @@ -707,7 +713,7 @@ private OsStats.Cgroup getCgroup() { cpuStat = new OsStats.Cgroup.CpuStat( cpuStatsMap.get("nr_periods"), cpuStatsMap.get("nr_throttled"), - cpuStatsMap.get("throttled_usec") * 1000 + cpuStatsMap.get("throttled_usec").multiply(THOUSAND) ); cgroupMemoryLimitInBytes = getCgroupV2MemoryLimitInBytes(memoryControlGroup); diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 7a2f46668f610..6c1ba2dfbe63a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.math.BigInteger; import java.util.Arrays; import java.util.Objects; @@ -362,7 +363,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Cgroup implements Writeable, ToXContentFragment { private final String cpuAcctControlGroup; - private final long cpuAcctUsageNanos; + private final BigInteger cpuAcctUsageNanos; private final String cpuControlGroup; private final long cpuCfsPeriodMicros; private final long cpuCfsQuotaMicros; @@ -387,7 +388,7 @@ public String getCpuAcctControlGroup() { * * @return the total CPU time in nanoseconds */ - public long getCpuAcctUsageNanos() { + public BigInteger getCpuAcctUsageNanos() { return cpuAcctUsageNanos; } @@ -465,7 +466,7 @@ public String getMemoryUsageInBytes() { public Cgroup( final String cpuAcctControlGroup, - final long cpuAcctUsageNanos, + final BigInteger cpuAcctUsageNanos, final String cpuControlGroup, final long cpuCfsPeriodMicros, final long cpuCfsQuotaMicros, @@ -487,7 +488,11 @@ public Cgroup( Cgroup(final StreamInput in) throws IOException { cpuAcctControlGroup = in.readString(); - cpuAcctUsageNanos = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + cpuAcctUsageNanos = in.readBigInteger(); + } else { + cpuAcctUsageNanos = BigInteger.valueOf(in.readLong()); + } cpuControlGroup = in.readString(); cpuCfsPeriodMicros = in.readLong(); cpuCfsQuotaMicros = in.readLong(); @@ -500,7 +505,11 @@ public Cgroup( @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(cpuAcctControlGroup); - out.writeLong(cpuAcctUsageNanos); + if (out.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + out.writeBigInteger(cpuAcctUsageNanos); + } else { + out.writeLong(cpuAcctUsageNanos.longValue()); + } out.writeString(cpuControlGroup); out.writeLong(cpuCfsPeriodMicros); out.writeLong(cpuCfsQuotaMicros); @@ -551,9 +560,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa */ public static class CpuStat implements Writeable, ToXContentFragment { - private final long numberOfElapsedPeriods; - private final long numberOfTimesThrottled; - private final long timeThrottledNanos; + private final BigInteger numberOfElapsedPeriods; + private final BigInteger numberOfTimesThrottled; + private final BigInteger timeThrottledNanos; /** * The number of elapsed periods. @@ -561,7 +570,7 @@ public static class CpuStat implements Writeable, ToXContentFragment { * @return the number of elapsed periods as measured by * {@code cpu.cfs_period_us} */ - public long getNumberOfElapsedPeriods() { + public BigInteger getNumberOfElapsedPeriods() { return numberOfElapsedPeriods; } @@ -571,7 +580,7 @@ public long getNumberOfElapsedPeriods() { * * @return the number of times */ - public long getNumberOfTimesThrottled() { + public BigInteger getNumberOfTimesThrottled() { return numberOfTimesThrottled; } @@ -581,27 +590,43 @@ public long getNumberOfTimesThrottled() { * * @return the total time in nanoseconds */ - public long getTimeThrottledNanos() { + public BigInteger getTimeThrottledNanos() { return timeThrottledNanos; } - public CpuStat(final long numberOfElapsedPeriods, final long numberOfTimesThrottled, final long timeThrottledNanos) { + public CpuStat( + final BigInteger numberOfElapsedPeriods, + final BigInteger numberOfTimesThrottled, + final BigInteger timeThrottledNanos + ) { this.numberOfElapsedPeriods = numberOfElapsedPeriods; this.numberOfTimesThrottled = numberOfTimesThrottled; this.timeThrottledNanos = timeThrottledNanos; } CpuStat(final StreamInput in) throws IOException { - numberOfElapsedPeriods = in.readLong(); - numberOfTimesThrottled = in.readLong(); - timeThrottledNanos = in.readLong(); + if (in.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + numberOfElapsedPeriods = in.readBigInteger(); + numberOfTimesThrottled = in.readBigInteger(); + timeThrottledNanos = in.readBigInteger(); + } else { + numberOfElapsedPeriods = BigInteger.valueOf(in.readLong()); + numberOfTimesThrottled = BigInteger.valueOf(in.readLong()); + timeThrottledNanos = BigInteger.valueOf(in.readLong()); + } } @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeLong(numberOfElapsedPeriods); - out.writeLong(numberOfTimesThrottled); - out.writeLong(timeThrottledNanos); + if (out.getTransportVersion().onOrAfter(TransportVersions.CPU_STAT_STRING_PARSING)) { + out.writeBigInteger(numberOfElapsedPeriods); + out.writeBigInteger(numberOfTimesThrottled); + out.writeBigInteger(timeThrottledNanos); + } else { + out.writeLong(numberOfElapsedPeriods.longValue()); + out.writeLong(numberOfTimesThrottled.longValue()); + out.writeLong(timeThrottledNanos.longValue()); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 77d00f0e5a068..b5f61d5b798fa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.math.BigInteger; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; @@ -709,11 +710,15 @@ public static NodeStats createNodeStats() { new OsStats.Swap(swapTotal, randomLongBetween(0, swapTotal)), new OsStats.Cgroup( randomAlphaOfLength(8), - randomNonNegativeLong(), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), randomAlphaOfLength(8), randomNonNegativeLong(), randomNonNegativeLong(), - new OsStats.Cgroup.CpuStat(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new OsStats.Cgroup.CpuStat( + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)) + ), randomAlphaOfLength(8), Long.toString(randomNonNegativeLong()), Long.toString(randomNonNegativeLong()) diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index 220f4336fc444..aad78881a8a13 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -136,12 +136,12 @@ public void testOsStats() { if (Constants.LINUX) { if (stats.getCgroup() != null) { assertThat(stats.getCgroup().getCpuAcctControlGroup(), notNullValue()); - assertThat(stats.getCgroup().getCpuAcctUsageNanos(), greaterThan(0L)); + assertThat(stats.getCgroup().getCpuAcctUsageNanos(), greaterThan(BigInteger.ZERO)); assertThat(stats.getCgroup().getCpuCfsQuotaMicros(), anyOf(equalTo(-1L), greaterThanOrEqualTo(0L))); assertThat(stats.getCgroup().getCpuCfsPeriodMicros(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), greaterThanOrEqualTo(0L)); - assertThat(stats.getCgroup().getCpuStat().getTimeThrottledNanos(), greaterThanOrEqualTo(0L)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfElapsedPeriods(), greaterThanOrEqualTo(BigInteger.ZERO)); + assertThat(stats.getCgroup().getCpuStat().getNumberOfTimesThrottled(), greaterThanOrEqualTo(BigInteger.ZERO)); + assertThat(stats.getCgroup().getCpuStat().getTimeThrottledNanos(), greaterThanOrEqualTo(BigInteger.ZERO)); // These could be null if transported from a node running an older version, but shouldn't be null on the current node assertThat(stats.getCgroup().getMemoryControlGroup(), notNullValue()); String memoryLimitInBytes = stats.getCgroup().getMemoryLimitInBytes(); @@ -191,26 +191,26 @@ public void testCgroupProbe() { case 1 -> { assertNotNull(cgroup); assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063112L)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(new BigInteger("364869866063112"))); assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); - assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); - assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); - assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645489L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(BigInteger.valueOf(17992))); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(BigInteger.valueOf(1311))); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(new BigInteger("139298645489"))); assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); } case 2 -> { assertNotNull(cgroup); assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063000L)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(new BigInteger("364869866063000"))); assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); - assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); - assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); - assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645000L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(BigInteger.valueOf(17992))); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(BigInteger.valueOf(1311))); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(new BigInteger("139298645000"))); assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); } diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java index 4c53067ca123a..2146e47febe9c 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsStatsTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.math.BigInteger; import static org.hamcrest.Matchers.equalTo; @@ -21,7 +22,7 @@ public class OsStatsTests extends ESTestCase { public void testSerialization() throws IOException { int numLoadAverages = randomIntBetween(1, 5); - double loadAverages[] = new double[numLoadAverages]; + double[] loadAverages = new double[numLoadAverages]; for (int i = 0; i < loadAverages.length; i++) { loadAverages[i] = randomDouble(); } @@ -32,11 +33,15 @@ public void testSerialization() throws IOException { OsStats.Swap swap = new OsStats.Swap(swapTotal, randomLongBetween(0, swapTotal)); OsStats.Cgroup cgroup = new OsStats.Cgroup( randomAlphaOfLength(8), - randomNonNegativeLong(), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), randomAlphaOfLength(8), randomNonNegativeLong(), randomNonNegativeLong(), - new OsStats.Cgroup.CpuStat(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new OsStats.Cgroup.CpuStat( + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)), + randomUnsignedLongBetween(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.TWO)) + ), randomAlphaOfLength(8), Long.toString(randomNonNegativeLong()), Long.toString(randomNonNegativeLong()) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index da23f27e1357e..3d7f843358646 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -37,6 +37,7 @@ import org.junit.Before; import java.io.IOException; +import java.math.BigInteger; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -227,7 +228,7 @@ public void testToXContent() throws IOException { "stat": { "number_of_elapsed_periods": 39, "number_of_times_throttled": 40, - "time_throttled_nanos": 41 + "time_throttled_nanos": 9223372036854775848 } }, "memory": { @@ -393,10 +394,14 @@ private static NodeStats mockNodeStats() { // Os final OsStats.Cpu osCpu = new OsStats.Cpu((short) no, new double[] { ++iota, ++iota, ++iota }); - final OsStats.Cgroup.CpuStat osCpuStat = new OsStats.Cgroup.CpuStat(++iota, ++iota, ++iota); + final OsStats.Cgroup.CpuStat osCpuStat = new OsStats.Cgroup.CpuStat( + BigInteger.valueOf(++iota), + BigInteger.valueOf(++iota), + BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(++iota)) + ); final OsStats.Cgroup osCgroup = new OsStats.Cgroup( "_cpu_acct_ctrl_group", - ++iota, + BigInteger.valueOf(++iota), "_cpu_ctrl_group", ++iota, ++iota, From ddad15676eae431329d74000e16ab311c09d9cf9 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 28 Oct 2024 08:19:29 -0400 Subject: [PATCH 184/202] Updating knn tuning guide and size estimates (#115691) --- docs/reference/how-to/knn-search.asciidoc | 30 ++++++++++++++++------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 1d9c988f7b6c9..83614b0d99024 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -16,10 +16,11 @@ structures. So these same recommendations also help with indexing speed. The default <> is `float`. But this can be automatically quantized during index time through <>. Quantization will reduce the -required memory by 4x, but it will also reduce the precision of the vectors and -increase disk usage for the field (by up to 25%). Increased disk usage is a +required memory by 4x, 8x, or as much as 32x, but it will also reduce the precision of the vectors and +increase disk usage for the field (by up to 25%, 12.5%, or 3.125%, respectively). Increased disk usage is a result of {es} storing both the quantized and the unquantized vectors. -For example, when quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. +For example, when int8 quantizing 40GB of floating point vectors an extra 10GB of data will be stored for the quantized vectors. +The total disk usage amounts to 50GB, but the memory usage for fast search will be reduced to 10GB. For `float` vectors with `dim` greater than or equal to `384`, using a <> index is highly recommended. @@ -68,12 +69,23 @@ Another option is to use <>. kNN search. HNSW is a graph-based algorithm which only works efficiently when most vector data is held in memory. You should ensure that data nodes have at least enough RAM to hold the vector data and index structures. To check the -size of the vector data, you can use the <> API. As a -loose rule of thumb, and assuming the default HNSW options, the bytes used will -be `num_vectors * 4 * (num_dimensions + 12)`. When using the `byte` <> -the space required will be closer to `num_vectors * (num_dimensions + 12)`. Note that -the required RAM is for the filesystem cache, which is separate from the Java -heap. +size of the vector data, you can use the <> API. + +Here are estimates for different element types and quantization levels: ++ +-- +`element_type: float`: `num_vectors * num_dimensions * 4` +`element_type: float` with `quantization: int8`: `num_vectors * (num_dimensions + 4)` +`element_type: float` with `quantization: int4`: `num_vectors * (num_dimensions/2 + 4)` +`element_type: float` with `quantization: bbq`: `num_vectors * (num_dimensions/8 + 12)` +`element_type: byte`: `num_vectors * num_dimensions` +`element_type: bit`: `num_vectors * (num_dimensions/8)` +-- + +If utilizing HNSW, the graph must also be in memory, to estimate the required bytes use `num_vectors * 4 * HNSW.m`. The +default value for `HNSW.m` is 16, so by default `num_vectors * 4 * 16`. + +Note that the required RAM is for the filesystem cache, which is separate from the Java heap. The data nodes should also leave a buffer for other ways that RAM is needed. For example your index might also include text fields and numerics, which also From ab558e663c4e5e086d16e862cff11bc6cdb82486 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:13:14 +0100 Subject: [PATCH 185/202] Update quickstart overview, add local install instructions (#115746) --- .../full-text-filtering-tutorial.asciidoc | 13 +++++++++++++ .../quickstart/getting-started.asciidoc | 15 ++++++++++----- docs/reference/quickstart/index.asciidoc | 19 +++++++++++++++---- .../run-elasticsearch-locally.asciidoc | 19 ++----------------- 4 files changed, 40 insertions(+), 26 deletions(-) diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc index 46cadc19f2547..fee4b797da724 100644 --- a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -19,6 +19,19 @@ The goal is to create search queries that enable users to: To achieve these goals we'll use different Elasticsearch queries to perform full-text search, apply filters, and combine multiple search criteria. +[discrete] +[[full-text-filter-tutorial-requirements]] +=== Requirements + +You'll need a running {es} cluster, together with {kib} to use the Dev Tools API Console. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE + [discrete] [[full-text-filter-tutorial-create-index]] === Step 1: Create an index diff --git a/docs/reference/quickstart/getting-started.asciidoc b/docs/reference/quickstart/getting-started.asciidoc index a6d233d8b8abc..03bfb62548b25 100644 --- a/docs/reference/quickstart/getting-started.asciidoc +++ b/docs/reference/quickstart/getting-started.asciidoc @@ -15,12 +15,17 @@ You can {kibana-ref}/console-kibana.html#import-export-console-requests[convert ==== [discrete] -[[getting-started-prerequisites]] -=== Prerequisites +[[getting-started-requirements]] +=== Requirements -Before you begin, you need to have a running {es} cluster. -The fastest way to get started is with a <>. -Refer to <> for other deployment options. +You'll need a running {es} cluster, together with {kib} to use the Dev Tools API Console. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE //// [source,console] diff --git a/docs/reference/quickstart/index.asciidoc b/docs/reference/quickstart/index.asciidoc index ed4c128392994..3fa6d53e6345d 100644 --- a/docs/reference/quickstart/index.asciidoc +++ b/docs/reference/quickstart/index.asciidoc @@ -9,7 +9,15 @@ Unless otherwise noted, these examples will use queries written in <> , or see our <>. +Run the following command in your terminal to set up a <>: + +[source,sh] +---- +curl -fsSL https://elastic.co/start-local | sh +---- +// NOTCONSOLE + +Alternatively, refer to our <>. [discrete] [[quickstart-list]] @@ -17,15 +25,18 @@ Get started <> , or see our <>. Learn about indices, documents, and mappings, and perform a basic search using the Query DSL. * <>. Learn about different options for querying data, including full-text search and filtering, using the Query DSL. +* <>: Learn how to create embeddings for your data with `semantic_text` and query using the `semantic` query. +** <>: Learn how to combine semantic search with full-text search. +* <>: Learn how to ingest dense vector embeddings into {es}. -[discrete] -[[quickstart-python-links]] -== Working in Python +.Working in Python +****************** If you're interested in using {es} with Python, check out Elastic Search Labs: * https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs` repository]: Contains a range of Python https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[notebooks] and https://github.com/elastic/elasticsearch-labs/tree/main/example-apps[example apps]. * https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[Tutorial]: This walks you through building a complete search solution with {es} from the ground up using Flask. +****************** include::getting-started.asciidoc[] include::full-text-filtering-tutorial.asciidoc[] diff --git a/docs/reference/run-elasticsearch-locally.asciidoc b/docs/reference/run-elasticsearch-locally.asciidoc index 03885132e4050..371660f2da7c9 100644 --- a/docs/reference/run-elasticsearch-locally.asciidoc +++ b/docs/reference/run-elasticsearch-locally.asciidoc @@ -42,6 +42,7 @@ To set up {es} and {kib} locally, run the `start-local` script: curl -fsSL https://elastic.co/start-local | sh ---- // NOTCONSOLE +// REVIEWED[OCT.28.2024] This script creates an `elastic-start-local` folder containing configuration files and starts both {es} and {kib} using Docker. @@ -50,29 +51,13 @@ After running the script, you can access Elastic services at the following endpo * *{es}*: http://localhost:9200 * *{kib}*: http://localhost:5601 -The script generates a random password for the `elastic` user, which is displayed at the end of the installation and stored in the `.env` file. +The script generates a random password for the `elastic` user, and an API key, stored in the `.env` file. [CAUTION] ==== This setup is for local testing only. HTTPS is disabled, and Basic authentication is used for {es}. For security, {es} and {kib} are accessible only through `localhost`. ==== -[discrete] -[[api-access]] -=== API access - -An API key for {es} is generated and stored in the `.env` file as `ES_LOCAL_API_KEY`. -Use this key to connect to {es} with a https://www.elastic.co/guide/en/elasticsearch/client/index.html[programming language client] or the <>. - -From the `elastic-start-local` folder, check the connection to Elasticsearch using `curl`: - -[source,sh] ----- -source .env -curl $ES_LOCAL_URL -H "Authorization: ApiKey ${ES_LOCAL_API_KEY}" ----- -// NOTCONSOLE - [discrete] [[local-dev-additional-info]] === Learn more From 0e184afad8b8894803c70bccf2ce7fbeb6acf829 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Mon, 28 Oct 2024 09:18:24 -0400 Subject: [PATCH 186/202] Change Reindexing metrics unit from millis to seconds (#115721) Fix the units for reindexing took time metrics from milliseconds to seconds. --- docs/changelog/115721.yaml | 5 +++++ .../main/java/org/elasticsearch/reindex/ReindexMetrics.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115721.yaml diff --git a/docs/changelog/115721.yaml b/docs/changelog/115721.yaml new file mode 100644 index 0000000000000..53703dcca872a --- /dev/null +++ b/docs/changelog/115721.yaml @@ -0,0 +1,5 @@ +pr: 115721 +summary: Change Reindexing metrics unit from millis to seconds +area: Reindex +type: enhancement +issues: [] diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java index d985a21815103..f7975120d9fc8 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexMetrics.java @@ -19,7 +19,7 @@ public class ReindexMetrics { private final LongHistogram reindexTimeSecsHistogram; public ReindexMetrics(MeterRegistry meterRegistry) { - this(meterRegistry.registerLongHistogram(REINDEX_TIME_HISTOGRAM, "Time to reindex by search", "millis")); + this(meterRegistry.registerLongHistogram(REINDEX_TIME_HISTOGRAM, "Time to reindex by search", "seconds")); } private ReindexMetrics(LongHistogram reindexTimeSecsHistogram) { From f0adbc6d50255268b4ebe327ceb2e70994525f8a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 28 Oct 2024 14:18:51 +0100 Subject: [PATCH 187/202] Simplify result handling in FetchSearchPhase (#115723) The results instance does not need to be a field. It's state handling is fairly straight forward, it needs to be released once all fetches have been procesed. No need to even create it on any other path so I split up the method slightly to clearly isolate when and how we need the results instance. Part of #115722 --- .../action/search/FetchSearchPhase.java | 92 ++++++++++--------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 7ad61f60c0088..99b24bd483fb4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -33,7 +33,6 @@ * Then it reaches out to all relevant shards to fetch the topN hits. */ final class FetchSearchPhase extends SearchPhase { - private final ArraySearchPhaseResults fetchResults; private final AtomicArray searchPhaseShardResults; private final BiFunction, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; @@ -79,8 +78,6 @@ final class FetchSearchPhase extends SearchPhase { + resultConsumer.getNumShards() ); } - this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); - context.addReleasable(fetchResults); this.searchPhaseShardResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -129,48 +126,56 @@ private void innerRun() throws Exception { // we have to release contexts here to free up resources searchPhaseShardResults.asList() .forEach(searchPhaseShardResult -> releaseIrrelevantSearchContext(searchPhaseShardResult, context)); - moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase); + moveToNextPhase(new AtomicArray<>(numShards), reducedQueryPhase); } else { - final boolean shouldExplainRank = shouldExplainRankScores(context.getRequest()); - final List> rankDocsPerShard = false == shouldExplainRank - ? null - : splitRankDocsPerShard(scoreDocs, numShards); - final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null - ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) - : null; - final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); - final CountedCollector counter = new CountedCollector<>( - fetchResults, - docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not - () -> moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase), - context - ); - for (int i = 0; i < docIdsToLoad.length; i++) { - List entry = docIdsToLoad[i]; - RankDocShardInfo rankDocs = rankDocsPerShard == null || rankDocsPerShard.get(i).isEmpty() - ? null - : new RankDocShardInfo(rankDocsPerShard.get(i)); - SearchPhaseResult shardPhaseResult = searchPhaseShardResults.get(i); - if (entry == null) { // no results for this shard ID - if (shardPhaseResult != null) { - // if we got some hits from this shard we have to release the context there - // we do this as we go since it will free up resources and passing on the request on the - // transport layer is cheap. - releaseIrrelevantSearchContext(shardPhaseResult, context); - progressListener.notifyFetchResult(i); - } - // in any case we count down this result since we don't talk to this shard anymore - counter.countDown(); - } else { - executeFetch( - shardPhaseResult, - counter, - entry, - rankDocs, - (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null - ); - } + innerRunFetch(scoreDocs, numShards, reducedQueryPhase); + } + } + } + + private void innerRunFetch(ScoreDoc[] scoreDocs, int numShards, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { + ArraySearchPhaseResults fetchResults = new ArraySearchPhaseResults<>(numShards); + final List> rankDocsPerShard = false == shouldExplainRankScores(context.getRequest()) + ? null + : splitRankDocsPerShard(scoreDocs, numShards); + final ScoreDoc[] lastEmittedDocPerShard = context.getRequest().scroll() != null + ? SearchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) + : null; + final List[] docIdsToLoad = SearchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); + final CountedCollector counter = new CountedCollector<>( + fetchResults, + docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not + () -> { + try (fetchResults) { + moveToNextPhase(fetchResults.getAtomicArray(), reducedQueryPhase); + } + }, + context + ); + for (int i = 0; i < docIdsToLoad.length; i++) { + List entry = docIdsToLoad[i]; + RankDocShardInfo rankDocs = rankDocsPerShard == null || rankDocsPerShard.get(i).isEmpty() + ? null + : new RankDocShardInfo(rankDocsPerShard.get(i)); + SearchPhaseResult shardPhaseResult = searchPhaseShardResults.get(i); + if (entry == null) { // no results for this shard ID + if (shardPhaseResult != null) { + // if we got some hits from this shard we have to release the context there + // we do this as we go since it will free up resources and passing on the request on the + // transport layer is cheap. + releaseIrrelevantSearchContext(shardPhaseResult, context); + progressListener.notifyFetchResult(i); } + // in any case we count down this result since we don't talk to this shard anymore + counter.countDown(); + } else { + executeFetch( + shardPhaseResult, + counter, + entry, + rankDocs, + (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[i] : null + ); } } } @@ -257,7 +262,6 @@ private void moveToNextPhase( ) { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); context.addReleasable(resp::decRef); - fetchResults.close(); context.executeNextPhase(this, nextPhaseFactory.apply(resp, searchPhaseShardResults)); } From 4058daf8b291f486ce7c75ac17b0ccdd31e3cac9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 28 Oct 2024 14:31:42 +0100 Subject: [PATCH 188/202] =?UTF-8?q?Revert=20"[DOCS]=20Documents=20that=20E?= =?UTF-8?q?LSER=20is=20the=20default=20service=20for=20`semantic=5Ftext?= =?UTF-8?q?=E2=80=A6"=20(#115748)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 541bcf30e5d03944cace8deec24559fc63c8bcb5. --- .../mapping/types/semantic-text.asciidoc | 26 +-------- .../semantic-search-semantic-text.asciidoc | 57 ++++++++++++++++--- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 893e2c6cff8ed..ac23c153e01a3 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -13,47 +13,25 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. -If you don't specify an inference endpoint, the <> is used by default. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. -If you use the ELSER service, you can set up `semantic_text` with the following API request: - [source,console] ------------------------------------------------------------ PUT my-index-000001 -{ - "mappings": { - "properties": { - "inference_field": { - "type": "semantic_text" - } - } - } -} ------------------------------------------------------------- - -NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. - -If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: - -[source,console] ------------------------------------------------------------- -PUT my-index-000002 { "mappings": { "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-openai-endpoint" <1> + "inference_id": "my-elser-endpoint" } } } } ------------------------------------------------------------ // TEST[skip:Requires inference endpoint] -<1> The `inference_id` of the {infer} endpoint to use to generate embeddings. The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. @@ -62,7 +40,7 @@ After creating dedicated {infer} endpoints for both, you can reference them usin [source,console] ------------------------------------------------------------ -PUT my-index-000003 +PUT my-index-000002 { "mappings": { "properties": { diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index f881ca87a92e6..60692c19c184a 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -21,11 +21,45 @@ This tutorial uses the <> for demonstra [[semantic-text-requirements]] ==== Requirements -This tutorial uses the <> for demonstration, which is created automatically as needed. -To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. +To use the `semantic_text` field type, you must have an {infer} endpoint deployed in +your cluster using the <>. -NOTE: In Serverless, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` even if you use the ELSER service. +[discrete] +[[semantic-text-infer-endpoint]] +==== Create the {infer} endpoint + +Create an inference endpoint by using the <>: +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-endpoint <1> +{ + "service": "elser", <2> + "service_settings": { + "adaptive_allocations": { <3> + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, + "num_threads": 1 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `sparse_embedding` in the path as the `elser` service will +be used and ELSER creates sparse vectors. The `inference_id` is +`my-elser-endpoint`. +<2> The `elser` service is used in this example. +<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. +Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. + +[NOTE] +==== +You might see a 502 bad gateway error in the response when using the {kib} Console. +This error usually just reflects a timeout, while the model downloads in the background. +You can check the download progress in the {ml-app} UI. +If using the Python client, you can set the `timeout` parameter to a higher value. +==== [discrete] [[semantic-text-index-mapping]] @@ -41,7 +75,8 @@ PUT semantic-embeddings "mappings": { "properties": { "content": { <1> - "type": "semantic_text" <2> + "type": "semantic_text", <2> + "inference_id": "my-elser-endpoint" <3> } } } @@ -50,14 +85,18 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -Since no `inference_id` is provided, the <> is used by default. -To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. +<3> The `inference_id` is the inference endpoint you created in the previous step. +It will be used to generate the embeddings based on the input text. +Every time you ingest data into the related `semantic_text` field, this endpoint will be used for creating the vector representation of the text. [NOTE] ==== -If you're using web crawlers or connectors to generate indices, you have to <> for these indices to include the `semantic_text` field. -Once the mapping is updated, you'll need to run a full web crawl or a full connector sync. -This ensures that all existing documents are reprocessed and updated with the new semantic embeddings, enabling semantic search on the updated data. +If you're using web crawlers or connectors to generate indices, you have to +<> for these indices to +include the `semantic_text` field. Once the mapping is updated, you'll need to run +a full web crawl or a full connector sync. This ensures that all existing +documents are reprocessed and updated with the new semantic embeddings, +enabling semantic search on the updated data. ==== From d67d8eacfecd89a55dd4647e19382e5f65b63844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 28 Oct 2024 14:32:02 +0100 Subject: [PATCH 189/202] [DOCS] Comments out default inference config docs. (#115742) --- .../inference/inference-apis.asciidoc | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 38afc7c416f18..037d7abeb2a36 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -35,21 +35,21 @@ Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. -[discrete] -[[default-enpoints]] -=== Default {infer} endpoints +//[discrete] +//[[default-enpoints]] +//=== Default {infer} endpoints -Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. -The following list contains the default {infer} endpoints listed by `inference_id`: +//Your {es} deployment contains some preconfigured {infer} endpoints that makes it easier for you to use them when defining `semantic_text` fields or {infer} processors. +//The following list contains the default {infer} endpoints listed by `inference_id`: -* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) -* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) +//* `.elser-2-elasticsearch`: uses the {ml-docs}/ml-nlp-elser.html[ELSER] built-in trained model for `sparse_embedding` tasks (recommended for English language texts) +//* `.multilingual-e5-small-elasticsearch`: uses the {ml-docs}/ml-nlp-e5.html[E5] built-in trained model for `text_embedding` tasks (recommended for non-English language texts) -Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. -The API call will automatically download and deploy the model which might take a couple of minutes. -Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. -For these models, the minimum number of allocations is `0`. -If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. +//Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. +//The API call will automatically download and deploy the model which might take a couple of minutes. +//Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. +//For these models, the minimum number of allocations is `0`. +//If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. [discrete] From 2b6828ddcdc962001ab46c4ab07d8277c740deb2 Mon Sep 17 00:00:00 2001 From: Marci W <333176+marciw@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:14:40 -0400 Subject: [PATCH 190/202] Document ?_tstart and ?_tend in Kibana (#114965) * Document ?_tstart and ?_tend in Kibana * Edits: restructure, be clearer --- docs/reference/esql/esql-kibana.asciidoc | 40 +++++++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/docs/reference/esql/esql-kibana.asciidoc b/docs/reference/esql/esql-kibana.asciidoc index 5da8b9323cc20..9850e012fc049 100644 --- a/docs/reference/esql/esql-kibana.asciidoc +++ b/docs/reference/esql/esql-kibana.asciidoc @@ -171,14 +171,44 @@ FROM kibana_sample_data_logs [[esql-kibana-time-filter]] === Time filtering -To display data within a specified time range, use the -{kibana-ref}/set-time-filter.html[time filter]. The time filter is only enabled -when the indices you're querying have a field called `@timestamp`. +To display data within a specified time range, you can use the standard time filter, +custom time parameters, or a WHERE command. -If your indices do not have a timestamp field called `@timestamp`, you can limit -the time range using the <> command and the <> function. +[discrete] +==== Standard time filter +The standard {kibana-ref}/set-time-filter.html[time filter] is enabled +when the indices you're querying have a field named `@timestamp`. + +[discrete] +==== Custom time parameters +If your indices do not have a field named `@timestamp`, you can use +the `?_tstart` and `?_tend` parameters to specify a time range. These parameters +work with any timestamp field and automatically sync with the {kibana-ref}/set-time-filter.html[time filter]. + +[source,esql] +---- +FROM my_index +| WHERE custom_timestamp >= ?_tstart AND custom_timestamp < ?_tend +---- + +You can also use the `?_tstart` and `?_tend` parameters with the <> function +to create auto-incrementing time buckets in {esql} <>. +For example: + +[source,esql] +---- +FROM kibana_sample_data_logs +| STATS average_bytes = AVG(bytes) BY BUCKET(@timestamp, 50, ?_tstart, ?_tend) +---- + +This example uses `50` buckets, which is the maximum number of buckets. + +[discrete] +==== WHERE command +You can also limit the time range using the <> command and the <> function. For example, if the timestamp field is called `timestamp`, to query the last 15 minutes of data: + [source,esql] ---- FROM kibana_sample_data_logs From 35808be7d810b26e35d6889d3024d22b0dd7a471 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 29 Oct 2024 01:16:25 +1100 Subject: [PATCH 191/202] Mute org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT testGeoShapeGeoHex #115705 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 3a59af6234038..b69455a68790b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -291,6 +291,9 @@ tests: - class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT method: testGeoShapeGeoTile issue: https://github.com/elastic/elasticsearch/issues/115717 +- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT + method: testGeoShapeGeoHex + issue: https://github.com/elastic/elasticsearch/issues/115705 # Examples: # From 7efa5a36c91913178e3fc5813cd4f39ef44e4123 Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Mon, 28 Oct 2024 10:29:54 -0400 Subject: [PATCH 192/202] Unmute TestFeatureLicenseTrackingIT testFeatureTrackingInferenceModelPipeline (#115340) Co-authored-by: Elastic Machine --- .../xpack/ml/integration/TestFeatureLicenseTrackingIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java index ce270c570c8cd..5ccc3d64daf1d 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureLicenseTrackingIT.java @@ -107,7 +107,6 @@ public void testFeatureTrackingAnomalyJob() throws Exception { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102381") public void testFeatureTrackingInferenceModelPipeline() throws Exception { String modelId = "test-load-models-classification-license-tracking"; Map oneHotEncoding = new HashMap<>(); From 15a3a4e353ff136c9f543c5b9fc6fca0631acb2a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 28 Oct 2024 07:34:57 -0700 Subject: [PATCH 193/202] Remove XPackFeatureSet interface (#115679) XPackFeatureSet hasn't been used for many years. But the inner "Usage" class is still used. This commit moves the Usage class up to its own file as XPackFeatureUsage, and removes the defunct XPackFeatureSet interface. closes #29736 --- .../AnalyticsInfoTransportActionTests.java | 10 +- .../xpack/ccr/CCRInfoTransportAction.java | 4 +- .../java/org/elasticsearch/xpack/ccr/Ccr.java | 4 +- .../action/XPackUsageRestCancellationIT.java | 4 +- .../xpack/core/HealthApiFeatureSetUsage.java | 2 +- .../core/RemoteClusterFeatureSetUsage.java | 2 +- .../xpack/core/XPackClientPlugin.java | 62 ++++++------ .../xpack/core/XPackFeatureSet.java | 83 ---------------- .../xpack/core/XPackFeatureUsage.java | 74 ++++++++++++++ .../action/TransportXPackUsageAction.java | 4 +- .../action/XPackUsageFeatureResponse.java | 10 +- .../xpack/core/action/XPackUsageResponse.java | 14 +-- .../AggregateMetricFeatureSetUsage.java | 4 +- .../analytics/AnalyticsFeatureSetUsage.java | 4 +- .../EnterpriseSearchFeatureSetUsage.java | 4 +- .../core/application/ProfilingUsage.java | 4 +- .../core/archive/ArchiveFeatureSetUsage.java | 4 +- .../DataStreamFeatureSetUsage.java | 4 +- .../DataStreamLifecycleFeatureSetUsage.java | 4 +- .../datatiers/DataTiersFeatureSetUsage.java | 4 +- .../core/enrich/EnrichFeatureSetUsage.java | 4 +- .../xpack/core/eql/EqlFeatureSetUsage.java | 4 +- .../xpack/core/esql/EsqlFeatureSetUsage.java | 4 +- .../frozen/FrozenIndicesFeatureSetUsage.java | 4 +- .../core/graph/GraphFeatureSetUsage.java | 4 +- .../ilm/IndexLifecycleFeatureSetUsage.java | 4 +- .../inference/InferenceFeatureSetUsage.java | 4 +- .../logstash/LogstashFeatureSetUsage.java | 4 +- .../ml/MachineLearningFeatureSetUsage.java | 4 +- .../monitoring/MonitoringFeatureSetUsage.java | 4 +- .../rest/action/RestXPackUsageAction.java | 4 +- .../core/rollup/RollupFeatureSetUsage.java | 4 +- .../SearchableSnapshotFeatureSetUsage.java | 4 +- .../security/SecurityFeatureSetUsage.java | 4 +- .../xpack/core/slm/SLMFeatureSetUsage.java | 4 +- .../core/spatial/SpatialFeatureSetUsage.java | 4 +- .../xpack/core/sql/SqlFeatureSetUsage.java | 4 +- .../transform/TransformFeatureSetUsage.java | 4 +- .../VotingOnlyNodeFeatureSetUsage.java | 4 +- .../core/watcher/WatcherFeatureSetUsage.java | 4 +- .../core/action/XPackUsageResponseTests.java | 14 +-- .../graph/GraphInfoTransportActionTests.java | 10 +- .../TransportInferenceUsageActionTests.java | 4 +- .../LogstashInfoTransportActionTests.java | 10 +- ...chineLearningInfoTransportActionTests.java | 42 ++++---- .../cluster/ClusterStatsCollector.java | 4 +- .../cluster/ClusterStatsMonitoringDoc.java | 10 +- .../MonitoringInfoTransportActionTests.java | 6 +- .../ClusterStatsMonitoringDocTests.java | 6 +- .../authc/esnative/NativeRealmIntegTests.java | 4 +- .../SecurityInfoTransportActionTests.java | 8 +- .../SpatialInfoTransportActionTests.java | 10 +- .../VotingOnlyInfoTransportAction.java | 38 +++++++ .../votingonly/VotingOnlyNodeFeatureSet.java | 99 ------------------- .../votingonly/VotingOnlyNodePlugin.java | 4 +- .../VotingOnlyUsageTransportAction.java | 55 +++++++++++ .../WatcherInfoTransportActionTests.java | 6 +- 57 files changed, 348 insertions(+), 367 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java create mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java delete mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java create mode 100644 x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java index eb7cc9c51c62e..77d3d5a5577b0 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/action/AnalyticsInfoTransportActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; @@ -75,12 +75,12 @@ public void testAvailable() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(task, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(true)); verify(client, times(1)).execute(any(), any(), any()); verifyNoMoreInteractions(client); @@ -103,12 +103,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(task, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertTrue(usage.enabled()); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new AnalyticsFeatureSetUsage(out.bytes().streamInput()); assertTrue(serializedUsage.enabled()); verify(client, times(1)).execute(any(), any(), any()); verifyNoMoreInteractions(client); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java index d4e31f25a0a91..6e6b54af4d0e1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; @@ -58,7 +58,7 @@ public boolean enabled() { return enabled; } - public static class Usage extends XPackFeatureSet.Usage { + public static class Usage extends XPackFeatureUsage { private final int numberOfFollowerIndices; private final int numberOfAutoFollowPatterns; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 395af34c59b3a..87a4c2c7d4826 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -92,7 +92,7 @@ import org.elasticsearch.xpack.ccr.rest.RestResumeAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; @@ -306,7 +306,7 @@ public List getNamedWriteables() { ), // usage api - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.CCR, CCRInfoTransportAction.Usage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.CCR, CCRInfoTransportAction.Usage::new) ); } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java index f17a89774f71d..a9c780ccd468c 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/XPackUsageRestCancellationIT.java @@ -34,7 +34,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; @@ -168,7 +168,7 @@ protected void masterOperation( ) throws Exception { blockingXPackUsageActionExecuting.countDown(); blockActionLatch.await(); - listener.onResponse(new XPackUsageFeatureResponse(new XPackFeatureSet.Usage("test", false, false) { + listener.onResponse(new XPackUsageFeatureResponse(new XPackFeatureUsage("test", false, false) { @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java index ac261270db6d6..6ec0f3f1c0d95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java @@ -71,7 +71,7 @@ * "enabled": true * } */ -public class HealthApiFeatureSetUsage extends XPackFeatureSet.Usage { +public class HealthApiFeatureSetUsage extends XPackFeatureUsage { private final Map usageStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java index 3a75ce34e22bf..de657e78c4302 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java @@ -17,7 +17,7 @@ import java.io.IOException; import java.util.List; -public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage { +public class RemoteClusterFeatureSetUsage extends XPackFeatureUsage { private final List remoteConnectionInfos; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 2e806a24ad469..9004239478bdf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -136,19 +136,19 @@ public List> getSettings() { public List getNamedWriteables() { return Stream.of( // graph - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), // logstash - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), // ML - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), // inference - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INFERENCE, InferenceFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.INFERENCE, InferenceFeatureSetUsage::new), // monitoring - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), // security new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), // security : configurable cluster privileges new NamedWriteableRegistry.Entry( ConfigurableClusterPrivilege.class, @@ -180,20 +180,20 @@ public List getNamedWriteables() { RemoteClusterPermissionGroup::new ), // eql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.EQL, EqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.EQL, EqlFeatureSetUsage::new), // esql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ESQL, EsqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ESQL, EsqlFeatureSetUsage::new), // sql - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SQL, SqlFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SQL, SqlFeatureSetUsage::new), // watcher new NamedWriteableRegistry.Entry(Metadata.Custom.class, WatcherMetadata.TYPE, WatcherMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, WatcherMetadata.TYPE, WatcherMetadata::readDiffFrom), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), // licensing new NamedWriteableRegistry.Entry(Metadata.Custom.class, LicensesMetadata.TYPE, LicensesMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetadata.TYPE, LicensesMetadata::readDiffFrom), // rollup - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), @@ -207,9 +207,9 @@ public List getNamedWriteables() { in -> AutoFollowMetadata.readDiffFrom(Metadata.Custom.class, AutoFollowMetadata.TYPE, in) ), // ILM - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), // SLM - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), // ILM - Custom Metadata new NamedWriteableRegistry.Entry(Metadata.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), new NamedWriteableRegistry.Entry( @@ -247,7 +247,7 @@ public List getNamedWriteables() { // Transforms new NamedWriteableRegistry.Entry(Metadata.Custom.class, TransformMetadata.TYPE, TransformMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TransformMetadata.TYPE, TransformMetadata.TransformMetadataDiff::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), new NamedWriteableRegistry.Entry(Task.Status.class, TransformField.TASK_NAME, TransformState::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, TransformField.TASK_NAME, TransformState::new), @@ -263,48 +263,44 @@ public List getNamedWriteables() { i -> NullRetentionPolicyConfig.INSTANCE ), // Voting Only Node - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), // Frozen indices - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), // Spatial - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), // Analytics - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), // Aggregate metric field type - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.AGGREGATE_METRIC, AggregateMetricFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.AGGREGATE_METRIC, AggregateMetricFeatureSetUsage::new), // Enrich - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ENRICH, EnrichFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Task.Status.class, ExecuteEnrichPolicyStatus.NAME, ExecuteEnrichPolicyStatus::new), // Searchable snapshots new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, + XPackFeatureUsage.class, XPackField.SEARCHABLE_SNAPSHOTS, SearchableSnapshotFeatureSetUsage::new ), // Data Streams - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, + XPackFeatureUsage.class, XPackField.DATA_STREAM_LIFECYCLE, DataStreamLifecycleFeatureSetUsage::new ), // Data Tiers - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), // Archive - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ARCHIVE, ArchiveFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ARCHIVE, ArchiveFeatureSetUsage::new), // TSDB Downsampling new NamedWriteableRegistry.Entry(LifecycleAction.class, DownsampleAction.NAME, DownsampleAction::new), // Health API usage - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.HEALTH_API, HealthApiFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.HEALTH_API, HealthApiFeatureSetUsage::new), // Remote cluster usage - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.REMOTE_CLUSTERS, RemoteClusterFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.REMOTE_CLUSTERS, RemoteClusterFeatureSetUsage::new), // Enterprise Search - new NamedWriteableRegistry.Entry( - XPackFeatureSet.Usage.class, - XPackField.ENTERPRISE_SEARCH, - EnterpriseSearchFeatureSetUsage::new - ), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.ENTERPRISE_SEARCH, EnterpriseSearchFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new), new NamedWriteableRegistry.Entry( PersistentTaskParams.class, SecurityMigrationTaskParams.TASK_NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java deleted file mode 100644 index 7593bc9af2902..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.core; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.VersionedNamedWriteable; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Objects; - -public interface XPackFeatureSet { - - String name(); - - boolean available(); - - boolean enabled(); - - abstract class Usage implements ToXContentObject, VersionedNamedWriteable { - - private static final String AVAILABLE_XFIELD = "available"; - private static final String ENABLED_XFIELD = "enabled"; - - protected final String name; - protected final boolean available; - protected final boolean enabled; - - public Usage(StreamInput input) throws IOException { - this(input.readString(), input.readBoolean(), input.readBoolean()); - } - - public Usage(String name, boolean available, boolean enabled) { - Objects.requireNonNull(name); - this.name = name; - this.available = available; - this.enabled = enabled; - } - - public String name() { - return name; - } - - public boolean available() { - return available; - } - - public boolean enabled() { - return enabled; - } - - @Override - public String getWriteableName() { - return name; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeBoolean(available); - out.writeBoolean(enabled); - } - - @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - innerXContent(builder, params); - return builder.endObject(); - } - - protected void innerXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(AVAILABLE_XFIELD, available); - builder.field(ENABLED_XFIELD, enabled); - } - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java new file mode 100644 index 0000000000000..0cbafd7972ebe --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureUsage.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.VersionedNamedWriteable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public abstract class XPackFeatureUsage implements ToXContentObject, VersionedNamedWriteable { + + private static final String AVAILABLE_XFIELD = "available"; + private static final String ENABLED_XFIELD = "enabled"; + + protected final String name; + protected final boolean available; + protected final boolean enabled; + + public XPackFeatureUsage(StreamInput input) throws IOException { + this(input.readString(), input.readBoolean(), input.readBoolean()); + } + + public XPackFeatureUsage(String name, boolean available, boolean enabled) { + Objects.requireNonNull(name); + this.name = name; + this.available = available; + this.enabled = enabled; + } + + public String name() { + return name; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Override + public String getWriteableName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeBoolean(available); + out.writeBoolean(enabled); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerXContent(builder, params); + return builder.endObject(); + } + + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(AVAILABLE_XFIELD, available); + builder.field(ENABLED_XFIELD, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java index 3f26dfdd78ca8..e78b8786e92e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.util.ArrayList; import java.util.List; @@ -64,7 +64,7 @@ protected List> usageActions() { @Override protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, ActionListener listener) { new ActionRunnable<>(listener) { - final List responses = new ArrayList<>(usageActions.size()); + final List responses = new ArrayList<>(usageActions.size()); @Override protected void doRun() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java index 71bb9993f3a29..0cacf67e7b309 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureResponse.java @@ -9,24 +9,24 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.io.IOException; public class XPackUsageFeatureResponse extends ActionResponse { - private final XPackFeatureSet.Usage usage; + private final XPackFeatureUsage usage; public XPackUsageFeatureResponse(StreamInput in) throws IOException { super(in); - usage = in.readNamedWriteable(XPackFeatureSet.Usage.class); + usage = in.readNamedWriteable(XPackFeatureUsage.class); } - public XPackUsageFeatureResponse(XPackFeatureSet.Usage usage) { + public XPackUsageFeatureResponse(XPackFeatureUsage usage) { this.usage = usage; } - public XPackFeatureSet.Usage getUsage() { + public XPackFeatureUsage getUsage() { return usage; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java index e6ed59539d161..6301d29316f25 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import java.io.IOException; import java.util.List; @@ -17,30 +17,30 @@ public class XPackUsageResponse extends ActionResponse { - private final List usages; + private final List usages; - public XPackUsageResponse(final List usages) { + public XPackUsageResponse(final List usages) { this.usages = Objects.requireNonNull(usages); } public XPackUsageResponse(final StreamInput in) throws IOException { - usages = in.readNamedWriteableCollectionAsList(XPackFeatureSet.Usage.class); + usages = in.readNamedWriteableCollectionAsList(XPackFeatureUsage.class); } - public List getUsages() { + public List getUsages() { return usages; } @Override public void writeTo(final StreamOutput out) throws IOException { // we can only write the usages with version the coordinating node is compatible with otherwise it will not know the named writeable - final List usagesToWrite = usages.stream() + final List usagesToWrite = usages.stream() .filter(usage -> out.getTransportVersion().onOrAfter(usage.getMinimalSupportedVersion())) .toList(); writeTo(out, usagesToWrite); } - private static void writeTo(final StreamOutput out, final List usages) throws IOException { + private static void writeTo(final StreamOutput out, final List usages) throws IOException { out.writeNamedWriteableCollection(usages); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java index 56a2fad47cf2f..5505cf3271b8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java @@ -10,13 +10,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class AggregateMetricFeatureSetUsage extends XPackFeatureSet.Usage { +public class AggregateMetricFeatureSetUsage extends XPackFeatureUsage { public AggregateMetricFeatureSetUsage(StreamInput input) throws IOException { super(input); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java index 89bd749f2ea1d..8d2fd2ecc0870 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.analytics.action.AnalyticsStatsAction; import java.io.IOException; import java.util.Objects; -public class AnalyticsFeatureSetUsage extends XPackFeatureSet.Usage { +public class AnalyticsFeatureSetUsage extends XPackFeatureUsage { private final AnalyticsStatsAction.Response response; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index 45b9d557b72b3..b1dac4898945d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -20,7 +20,7 @@ import java.util.Map; import java.util.Objects; -public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { +public class EnterpriseSearchFeatureSetUsage extends XPackFeatureUsage { static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_10_X; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java index a487bbb4e27df..5f46e5aa2d4c2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java @@ -10,12 +10,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class ProfilingUsage extends XPackFeatureSet.Usage { +public class ProfilingUsage extends XPackFeatureUsage { public ProfilingUsage(StreamInput input) throws IOException { super(input); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java index 4201a30034786..5f7c7554f78f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class ArchiveFeatureSetUsage extends XPackFeatureSet.Usage { +public class ArchiveFeatureSetUsage extends XPackFeatureUsage { private final int numberOfArchiveIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index e267c94e06892..1a964f3c57dbb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -15,13 +15,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class DataStreamFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataStreamFeatureSetUsage extends XPackFeatureUsage { private final DataStreamStats streamStats; public DataStreamFeatureSetUsage(StreamInput input) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index 4c550c69e4c09..7a31888a440c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -16,7 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -24,7 +24,7 @@ import java.util.Map; import java.util.Objects; -public class DataStreamLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataStreamLifecycleFeatureSetUsage extends XPackFeatureUsage { public static final DataStreamLifecycleFeatureSetUsage DISABLED = new DataStreamLifecycleFeatureSetUsage(); final LifecycleStats lifecycleStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index f990118763bad..a33dd7dff3469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -30,7 +30,7 @@ * See {@link TierSpecificStats} for the stats that are tracked on a per-tier * basis. */ -public class DataTiersFeatureSetUsage extends XPackFeatureSet.Usage { +public class DataTiersFeatureSetUsage extends XPackFeatureUsage { private final Map tierStats; public DataTiersFeatureSetUsage(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java index ab058909761d7..819b3d86b68c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java @@ -10,12 +10,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class EnrichFeatureSetUsage extends XPackFeatureSet.Usage { +public class EnrichFeatureSetUsage extends XPackFeatureUsage { public EnrichFeatureSetUsage() { super(XPackField.ENRICH, true, true); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 6285840b66039..0edbda79ed975 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class EqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class EqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java index 5707bc054e58f..665f8e7952363 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class EsqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class EsqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java index a8702560e4804..b8b6877e877fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class FrozenIndicesFeatureSetUsage extends XPackFeatureSet.Usage { +public class FrozenIndicesFeatureSetUsage extends XPackFeatureUsage { private final int numberOfFrozenIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java index 2ac1c11ce9147..b046efaa30082 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class GraphFeatureSetUsage extends XPackFeatureSet.Usage { +public class GraphFeatureSetUsage extends XPackFeatureUsage { public GraphFeatureSetUsage(StreamInput input) throws IOException { super(input); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java index cc2e54e5be247..822f15d1ed74a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java @@ -18,7 +18,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; @@ -27,7 +27,7 @@ import java.util.Map; import java.util.Objects; -public class IndexLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { +public class IndexLifecycleFeatureSetUsage extends XPackFeatureUsage { private List policyStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java index 61409f59f9d85..00bb8087c9fb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/InferenceFeatureSetUsage.java @@ -15,14 +15,14 @@ import org.elasticsearch.inference.TaskType; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collection; import java.util.Objects; -public class InferenceFeatureSetUsage extends XPackFeatureSet.Usage { +public class InferenceFeatureSetUsage extends XPackFeatureUsage { public static class ModelStats implements ToXContentObject, Writeable { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java index a83b8439aa612..f3f0214b89c04 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class LogstashFeatureSetUsage extends XPackFeatureSet.Usage { +public class LogstashFeatureSetUsage extends XPackFeatureUsage { public LogstashFeatureSetUsage(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 60484675ec90b..0645299dfc30e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; import java.util.Objects; -public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { +public class MachineLearningFeatureSetUsage extends XPackFeatureUsage { public static final String ALL = "_all"; public static final String JOBS_FIELD = "jobs"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index 4c0f347a3ffed..b181bd78dfe41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; import java.util.Map; -public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { +public class MonitoringFeatureSetUsage extends XPackFeatureUsage { @Nullable private Boolean collectionEnabled; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index ca57dbec5bef5..d373a9e0bf3ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; @@ -52,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client @Override public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder builder) throws Exception { builder.startObject(); - for (XPackFeatureSet.Usage usage : response.getUsages()) { + for (XPackFeatureUsage usage : response.getUsages()) { builder.field(usage.name(), usage); } builder.endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java index 6b4ee3c1fdd16..82253ba08165a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java @@ -11,12 +11,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class RollupFeatureSetUsage extends XPackFeatureSet.Usage { +public class RollupFeatureSetUsage extends XPackFeatureUsage { private final int numberOfRollupJobs; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java index e1644e5113a27..d7d0320b602b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class SearchableSnapshotFeatureSetUsage extends XPackFeatureSet.Usage { +public class SearchableSnapshotFeatureSetUsage extends XPackFeatureUsage { private final int numberOfSearchableSnapshotIndices; private final int numberOfFullCopySearchableSnapshotIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index c88e13f80ba01..2793ddea3bd06 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; import java.util.Map; -public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { +public class SecurityFeatureSetUsage extends XPackFeatureUsage { private static final String REALMS_XFIELD = "realms"; private static final String ROLES_XFIELD = "roles"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java index 289c76714b731..099eaa2468e1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Objects; -public class SLMFeatureSetUsage extends XPackFeatureSet.Usage { +public class SLMFeatureSetUsage extends XPackFeatureUsage { @Nullable private final SnapshotLifecycleStats slmStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java index 380c0e97d30dd..ac495ddebab3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java @@ -11,14 +11,14 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.spatial.action.SpatialStatsAction; import java.io.IOException; import java.util.Objects; -public class SpatialFeatureSetUsage extends XPackFeatureSet.Usage { +public class SpatialFeatureSetUsage extends XPackFeatureUsage { private final SpatialStatsAction.Response statsResponse; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java index a431dab0b34d5..2f41c8d2e2bb6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class SqlFeatureSetUsage extends XPackFeatureSet.Usage { +public class SqlFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java index 66c97876c1f6f..e4c15a3b9007c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; @@ -22,7 +22,7 @@ import java.util.Map.Entry; import java.util.Objects; -public class TransformFeatureSetUsage extends Usage { +public class TransformFeatureSetUsage extends XPackFeatureUsage { private static final String FEATURE_COUNTS = "feature_counts"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java index cf9d2499b60b7..6d8ed0e33d7d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java @@ -9,12 +9,12 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; -public class VotingOnlyNodeFeatureSetUsage extends XPackFeatureSet.Usage { +public class VotingOnlyNodeFeatureSetUsage extends XPackFeatureUsage { public VotingOnlyNodeFeatureSetUsage(StreamInput input) throws IOException { super(input); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java index fce9399a9bf01..77280c727be3b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java @@ -11,13 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Map; -public class WatcherFeatureSetUsage extends XPackFeatureSet.Usage { +public class WatcherFeatureSetUsage extends XPackFeatureUsage { private final Map stats; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java index 184a4fc064441..fee3a091a9f4e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/XPackUsageResponseTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.junit.BeforeClass; import java.io.IOException; @@ -42,7 +42,7 @@ public static void setVersion() { ); } - public static class OldUsage extends XPackFeatureSet.Usage { + public static class OldUsage extends XPackFeatureUsage { public OldUsage() { super("old", randomBoolean(), randomBoolean()); @@ -59,7 +59,7 @@ public TransportVersion getMinimalSupportedVersion() { } - public static class NewUsage extends XPackFeatureSet.Usage { + public static class NewUsage extends XPackFeatureUsage { public NewUsage() { super("new", randomBoolean(), randomBoolean()); @@ -84,8 +84,8 @@ public void testVersionDependentSerializationWriteToOldStream() throws IOExcepti final NamedWriteableRegistry registry = new NamedWriteableRegistry( List.of( - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "old", OldUsage::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "new", NewUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "old", OldUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "new", NewUsage::new) ) ); @@ -103,8 +103,8 @@ public void testVersionDependentSerializationWriteToNewStream() throws IOExcepti final NamedWriteableRegistry registry = new NamedWriteableRegistry( List.of( - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "old", OldUsage::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, "new", NewUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "old", OldUsage::new), + new NamedWriteableRegistry.Entry(XPackFeatureUsage.class, "new", NewUsage::new) ) ); diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java index 90034c9620923..db6bfe798568d 100644 --- a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/GraphInfoTransportActionTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.junit.Before; @@ -56,12 +56,12 @@ public void testAvailable() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } @@ -96,12 +96,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.enabled(), is(enabled)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new GraphFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(enabled)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java index b0c59fe160be3..bf173432f3d91 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportInferenceUsageActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.inference.InferenceFeatureSetUsage; @@ -98,7 +98,7 @@ public void test() throws Exception { BytesStreamOutput out = new BytesStreamOutput(); future.get().getUsage().writeTo(out); - XPackFeatureSet.Usage usage = new InferenceFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage usage = new InferenceFeatureSetUsage(out.bytes().streamInput()); assertThat(usage.name(), is(XPackField.INFERENCE)); assertTrue(usage.enabled()); diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java index 428d3bfeaa5ee..cfcefbaee461a 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/LogstashInfoTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; @@ -36,11 +36,11 @@ public void testEnabledDefault() throws Exception { LogstashUsageTransportAction usageAction = newUsageAction(false); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(true)); } @@ -54,12 +54,12 @@ public void testAvailable() throws Exception { var usageAction = newUsageAction(available); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, null, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new LogstashFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java index 4fdb7d2e5e46c..9bb5259b0b30e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningInfoTransportActionTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.action.util.QueryPage; @@ -203,12 +203,12 @@ public void testAvailable() throws Exception { var usageAction = newUsageAction(commonSettings, randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(available)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(available)); } @@ -233,12 +233,12 @@ public void testEnabled() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.enabled(), is(expected)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.enabled(), is(expected)); } @@ -254,13 +254,13 @@ public void testUsage() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -412,13 +412,13 @@ public void testAnomalyDetectionDisabled() throws Exception { var usageAction = newUsageAction(settings.build(), false, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -508,13 +508,13 @@ public void testUsageWithTrainedModelsDisabled() throws Exception { var usageAction = newUsageAction(settings.build(), true, false, false); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(true)); @@ -605,7 +605,7 @@ public void testUsageWithOrphanedTask() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -640,12 +640,12 @@ public void testUsageDisabledML() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage mlUsage = future.get().getUsage(); + XPackFeatureUsage mlUsage = future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); mlUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(mlUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.MACHINE_LEARNING)); assertThat(usage.enabled(), is(false)); @@ -662,14 +662,14 @@ public void testNodeCount() throws Exception { var usageAction = newUsageAction(settings.build(), randomBoolean(), randomBoolean(), randomBoolean()); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, clusterState, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); assertThat(usage.enabled(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput()); XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -688,7 +688,7 @@ public void testUsageGivenMlMetadataNotInstalled() throws Exception { var usageAction = newUsageAction(settings.build(), true, true, true); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); assertThat(usage.enabled(), is(true)); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java index a5f3792d6df8d..add6efe8d1cf1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java @@ -26,7 +26,7 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -93,7 +93,7 @@ protected Collection doCollect(final MonitoringDoc.Node node, fin final String clusterUuid = clusterUuid(clusterState); final String version = Build.current().version(); final License license = licenseService.getLicense(); - final List xpackUsage = collect( + final List xpackUsage = collect( () -> client.execute(XPackUsageAction.INSTANCE, new XPackUsageRequest(getCollectionTimeout())) .actionGet(getCollectionTimeout()) .getUsages() diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java index e25c11304a7b2..d056164ba610c 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDoc.java @@ -17,7 +17,7 @@ import org.elasticsearch.license.License; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -53,7 +53,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc { private final String version; private final License license; private final boolean apmIndicesExist; - private final List usages; + private final List usages; private final ClusterStatsResponse clusterStats; private final ClusterState clusterState; private final ClusterHealthStatus status; @@ -69,7 +69,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc { final ClusterHealthStatus status, @Nullable final License license, final boolean apmIndicesExist, - @Nullable final List usages, + @Nullable final List usages, @Nullable final ClusterStatsResponse clusterStats, @Nullable final ClusterState clusterState, final boolean clusterNeedsTLSEnabled @@ -103,7 +103,7 @@ boolean getAPMIndicesExist() { return apmIndicesExist; } - List getUsages() { + List getUsages() { return usages; } @@ -198,7 +198,7 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO if (usages != null) { builder.startObject("xpack"); - for (final XPackFeatureSet.Usage usage : usages) { + for (final XPackFeatureUsage usage : usages) { builder.field(usage.name(), usage); } builder.endObject(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java index 4d242db394d10..0b2cbe03e6431 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringInfoTransportActionTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.monitoring.exporter.Exporter; @@ -109,8 +109,8 @@ public void testUsage() throws Exception { monitoringUsage.writeTo(out); StreamInput in = out.bytes().streamInput(); in.setTransportVersion(serializedVersion); - XPackFeatureSet.Usage serializedUsage = new MonitoringFeatureSetUsage(in); - for (XPackFeatureSet.Usage usage : Arrays.asList(monitoringUsage, serializedUsage)) { + XPackFeatureUsage serializedUsage = new MonitoringFeatureSetUsage(in); + for (XPackFeatureUsage usage : Arrays.asList(monitoringUsage, serializedUsage)) { ObjectPath source; try (XContentBuilder builder = jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 3a9069dee064d..9458442557694 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -60,7 +60,7 @@ import org.elasticsearch.test.BuildUtils; import org.elasticsearch.transport.TransportInfo; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -91,7 +91,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase usages; + private List usages; private ClusterStatsResponse clusterStats; private ClusterState clusterState; private License license; @@ -312,7 +312,7 @@ public void testToXContent() throws IOException { .maxNodes(2) .build(); - final List usageList = singletonList(new MonitoringFeatureSetUsage(false, null)); + final List usageList = singletonList(new MonitoringFeatureSetUsage(false, null)); final NodeInfo mockNodeInfo = mock(NodeInfo.class); var mockNodeVersion = randomAlphaOfLengthBetween(6, 32); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index e178f4bf3eb6c..7ddeb3f9695ef 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; @@ -945,7 +945,7 @@ public void testRealmUsageStats() { } XPackUsageResponse response = safeGet(client().execute(XPackUsageAction.INSTANCE, new XPackUsageRequest(SAFE_AWAIT_TIMEOUT))); - Optional securityUsage = response.getUsages() + Optional securityUsage = response.getUsages() .stream() .filter(usage -> usage instanceof SecurityFeatureSetUsage) .findFirst(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java index 88f233087e1dd..688e12fe727c9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityInfoTransportActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; @@ -216,8 +216,8 @@ public void testUsage() throws Exception { SecurityFeatureSetUsage securityUsage = (SecurityFeatureSetUsage) future.get().getUsage(); BytesStreamOutput out = new BytesStreamOutput(); securityUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(securityUsage, serializedUsage)) { + XPackFeatureUsage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); + for (XPackFeatureUsage usage : Arrays.asList(securityUsage, serializedUsage)) { assertThat(usage, is(notNullValue())); assertThat(usage.name(), is(XPackField.SECURITY)); assertThat(usage.enabled(), is(enabled)); @@ -318,7 +318,7 @@ public void testUsage() throws Exception { } } - private XContentSource getXContentSource(XPackFeatureSet.Usage usage) throws IOException { + private XContentSource getXContentSource(XPackFeatureUsage usage) throws IOException { XContentSource source; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java index ede64decbca33..683dcea48e46e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/action/SpatialInfoTransportActionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.test.MockUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; import org.elasticsearch.xpack.core.spatial.action.SpatialStatsAction; @@ -73,12 +73,12 @@ public void testAvailable() throws Exception { PlainActionFuture future = new PlainActionFuture<>(); Task task = new Task(1L, "_type", "_action", "_description", null, Collections.emptyMap()); usageAction.masterOperation(task, null, clusterService.state(), future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertThat(usage.available(), is(true)); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); assertThat(serializedUsage.available(), is(true)); } @@ -99,12 +99,12 @@ public void testEnabled() throws Exception { ); PlainActionFuture future = new PlainActionFuture<>(); usageAction.masterOperation(mock(Task.class), null, clusterService.state(), future); - XPackFeatureSet.Usage usage = future.get().getUsage(); + XPackFeatureUsage usage = future.get().getUsage(); assertTrue(usage.enabled()); BytesStreamOutput out = new BytesStreamOutput(); usage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new SpatialFeatureSetUsage(out.bytes().streamInput()); assertTrue(serializedUsage.enabled()); } diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java new file mode 100644 index 0000000000000..9e758ee155644 --- /dev/null +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyInfoTransportAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.cluster.coordination.votingonly; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class VotingOnlyInfoTransportAction extends XPackInfoFeatureTransportAction { + + @Inject + public VotingOnlyInfoTransportAction(TransportService transportService, ActionFilters actionFilters) { + super(XPackInfoFeatureAction.VOTING_ONLY.name(), transportService, actionFilters); + } + + @Override + protected String name() { + return XPackField.VOTING_ONLY; + } + + @Override + protected boolean available() { + return true; + } + + @Override + protected boolean enabled() { + return true; + } +} diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java deleted file mode 100644 index 54bb265321799..0000000000000 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodeFeatureSet.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.cluster.coordination.votingonly; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackFeatureSet; -import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; -import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; -import org.elasticsearch.xpack.core.votingonly.VotingOnlyNodeFeatureSetUsage; - -public class VotingOnlyNodeFeatureSet implements XPackFeatureSet { - - @Override - public String name() { - return XPackField.VOTING_ONLY; - } - - @Override - public boolean available() { - return true; - } - - @Override - public boolean enabled() { - return true; - } - - public static class UsageTransportAction extends XPackUsageFeatureTransportAction { - - @Inject - public UsageTransportAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - XPackUsageFeatureAction.VOTING_ONLY.name(), - transportService, - clusterService, - threadPool, - actionFilters, - indexNameExpressionResolver - ); - } - - @Override - protected void masterOperation( - Task task, - XPackUsageRequest request, - ClusterState state, - ActionListener listener - ) { - final VotingOnlyNodeFeatureSetUsage usage = new VotingOnlyNodeFeatureSetUsage(); - listener.onResponse(new XPackUsageFeatureResponse(usage)); - } - } - - public static class UsageInfoAction extends XPackInfoFeatureTransportAction { - - @Inject - public UsageInfoAction(TransportService transportService, ActionFilters actionFilters) { - super(XPackInfoFeatureAction.VOTING_ONLY.name(), transportService, actionFilters); - } - - @Override - protected String name() { - return XPackField.VOTING_ONLY; - } - - @Override - protected boolean available() { - return true; - } - - @Override - protected boolean enabled() { - return true; - } - } -} diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java index 8bd951cff40da..1e1103a6e54a2 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java @@ -84,8 +84,8 @@ public Collection createComponents(PluginServices services) { @Override public List> getActions() { return Arrays.asList( - new ActionHandler<>(XPackUsageFeatureAction.VOTING_ONLY, VotingOnlyNodeFeatureSet.UsageTransportAction.class), - new ActionHandler<>(XPackInfoFeatureAction.VOTING_ONLY, VotingOnlyNodeFeatureSet.UsageInfoAction.class) + new ActionHandler<>(XPackUsageFeatureAction.VOTING_ONLY, VotingOnlyUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.VOTING_ONLY, VotingOnlyInfoTransportAction.class) ); } diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java new file mode 100644 index 0000000000000..dd449b10fd87f --- /dev/null +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyUsageTransportAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.cluster.coordination.votingonly; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.votingonly.VotingOnlyNodeFeatureSetUsage; + +public class VotingOnlyUsageTransportAction extends XPackUsageFeatureTransportAction { + + @Inject + public VotingOnlyUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + XPackUsageFeatureAction.VOTING_ONLY.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + final VotingOnlyNodeFeatureSetUsage usage = new VotingOnlyNodeFeatureSetUsage(); + listener.onResponse(new XPackUsageFeatureResponse(usage)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java index ac683dca5bf26..87258afcc5320 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherInfoTransportActionTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureUsage; import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherField; @@ -155,9 +155,9 @@ public void testUsageStats() throws Exception { assertThat(spam, is(1L)); BytesStreamOutput out = new BytesStreamOutput(); watcherUsage.writeTo(out); - XPackFeatureSet.Usage serializedUsage = new WatcherFeatureSetUsage(out.bytes().streamInput()); + XPackFeatureUsage serializedUsage = new WatcherFeatureSetUsage(out.bytes().streamInput()); - for (XPackFeatureSet.Usage usage : Arrays.asList(watcherUsage, serializedUsage)) { + for (XPackFeatureUsage usage : Arrays.asList(watcherUsage, serializedUsage)) { XContentBuilder builder = jsonBuilder(); usage.toXContent(builder, ToXContent.EMPTY_PARAMS); From 9dae38e7efde09eba269385d426774d634373204 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Mon, 28 Oct 2024 14:44:09 +0000 Subject: [PATCH 194/202] Unmute test that was fixed by #112145 (#115682) The template warning was fixed by https://github.com/elastic/elasticsearch/pull/112145 #112147 was reopened as it was still muted even though the issue was closed. This unmutes the test. Closes #112147 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index b69455a68790b..51345113addec 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -65,8 +65,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/112147 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 From 2207743e4c11d9ae1db48526122eda85d54ca504 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 28 Oct 2024 09:48:21 -0500 Subject: [PATCH 195/202] Adding testUsedMaxMindResponseClassesAreAccountedFor back to MaxMindSupportTests (#115706) --- .../ingest/geoip/MaxMindSupportTests.java | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java index 79a4190af284a..292a7e3c632d3 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/MaxMindSupportTests.java @@ -24,9 +24,11 @@ import com.maxmind.geoip2.record.MaxMind; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import java.lang.reflect.Method; +import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.net.InetAddress; @@ -479,6 +481,36 @@ public void testUnknownMaxMindResponseClassess() { ); } + /* + * This tests that this test has a mapping in TYPE_TO_MAX_MIND_CLASS for all MaxMind classes exposed through GeoIpDatabase. + */ + public void testUsedMaxMindResponseClassesAreAccountedFor() { + Set> usedMaxMindResponseClasses = getUsedMaxMindResponseClasses(); + Set> supportedMaxMindClasses = new HashSet<>(TYPE_TO_MAX_MIND_CLASS.values()); + Set> usedButNotSupportedMaxMindResponseClasses = Sets.difference( + usedMaxMindResponseClasses, + supportedMaxMindClasses + ); + assertThat( + "MaxmindIpDataLookups exposes MaxMind response classes that this test does not know what to do with. Add mappings to " + + "TYPE_TO_MAX_MIND_CLASS for the following: " + + usedButNotSupportedMaxMindResponseClasses, + usedButNotSupportedMaxMindResponseClasses, + empty() + ); + Set> supportedButNotUsedMaxMindClasses = Sets.difference( + supportedMaxMindClasses, + usedMaxMindResponseClasses + ); + assertThat( + "This test claims to support MaxMind response classes that are not exposed in GeoIpDatabase. Remove the following from " + + "TYPE_TO_MAX_MIND_CLASS: " + + supportedButNotUsedMaxMindClasses, + supportedButNotUsedMaxMindClasses, + empty() + ); + } + /* * This is the list of field types that causes us to stop recursing. That is, fields of these types are the lowest-level fields that * we care about. @@ -597,4 +629,34 @@ private static String getFormattedList(Set fields) { } return result.toString(); } + + /* + * This returns all AbstractResponse classes that are declared in transform methods in classes defined in MaxmindIpDataLookups. + */ + @SuppressWarnings("unchecked") + @SuppressForbidden(reason = "Need declared classes and methods") + private static Set> getUsedMaxMindResponseClasses() { + Set> result = new HashSet<>(); + Class[] declaredClasses = MaxmindIpDataLookups.class.getDeclaredClasses(); + for (Class declaredClass : declaredClasses) { + if (Modifier.isAbstract(declaredClass.getModifiers())) { + continue; + } + Method[] declaredMethods = declaredClass.getDeclaredMethods(); + Optional nonAbstractTransformMethod = Arrays.stream(declaredMethods) + .filter( + method -> method.getName().equals("transform") + && method.getParameterTypes().length == 1 + && Modifier.isAbstract(method.getParameterTypes()[0].getModifiers()) == false + ) + .findAny(); + if (nonAbstractTransformMethod.isPresent()) { + Class responseClass = nonAbstractTransformMethod.get().getParameterTypes()[0]; + if (AbstractResponse.class.isAssignableFrom(responseClass)) { + result.add((Class) responseClass); + } + } + } + return result; + } } From 03f2559f37fba8cd52f1e3d1add76b431a375a80 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Mon, 28 Oct 2024 14:50:56 +0000 Subject: [PATCH 196/202] #114220 Unmute ExplainLifecycleIT testStepInfoPreservedOnAutoRetry (#115768) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 51345113addec..c37733cb7307c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -198,9 +198,6 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT - method: testStepInfoPreservedOnAutoRetry - issue: https://github.com/elastic/elasticsearch/issues/114220 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 From a1670c18bfeac4a14057130d328053ecc3c8fe71 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Oct 2024 15:32:00 +0000 Subject: [PATCH 197/202] Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` (#115715) `CompletableFuture.runAsync` implicitly catches all `Throwable` instances thrown by the task, which includes `Error` instances that no reasonable application should catch. Moreover, discarding the return value from these methods means that any such `Error` will be ignored, allowing the JVM to carry on running in an invalid state. This commit replaces these trappy calls with more appropriate exception handling. --- docs/changelog/115715.yaml | 5 +++++ .../inference/src/main/java/module-info.java | 1 + .../AmazonBedrockStreamingChatProcessor.java | 19 +++++++++++++++---- 3 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/115715.yaml diff --git a/docs/changelog/115715.yaml b/docs/changelog/115715.yaml new file mode 100644 index 0000000000000..378f2c42e5e50 --- /dev/null +++ b/docs/changelog/115715.yaml @@ -0,0 +1,5 @@ +pr: 115715 +summary: Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index 60cb254e0afbe..53974657e4e23 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -33,6 +33,7 @@ requires org.slf4j; requires software.amazon.awssdk.retries.api; requires org.reactivestreams; + requires org.elasticsearch.logging; exports org.elasticsearch.xpack.inference.action; exports org.elasticsearch.xpack.inference.registry; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java index 439fc5b65efd5..12f394e300e0f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/amazonbedrock/AmazonBedrockStreamingChatProcessor.java @@ -14,11 +14,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.inference.results.StreamingChatCompletionResults; import java.util.ArrayDeque; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.Flow; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -27,6 +28,8 @@ import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; class AmazonBedrockStreamingChatProcessor implements Flow.Processor { + private static final Logger logger = LogManager.getLogger(AmazonBedrockStreamingChatProcessor.class); + private final AtomicReference error = new AtomicReference<>(null); private final AtomicLong demand = new AtomicLong(0); private final AtomicBoolean isDone = new AtomicBoolean(false); @@ -75,13 +78,13 @@ public void onNext(ConverseStreamOutput item) { // this is always called from a netty thread maintained by the AWS SDK, we'll move it to our thread to process the response private void sendDownstreamOnAnotherThread(ContentBlockDeltaEvent event) { - CompletableFuture.runAsync(() -> { + runOnUtilityThreadPool(() -> { var text = event.delta().text(); var result = new ArrayDeque(1); result.offer(new StreamingChatCompletionResults.Result(text)); var results = new StreamingChatCompletionResults.Results(result); downstream.onNext(results); - }, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + }); } @Override @@ -108,6 +111,14 @@ public void onComplete() { } } + private void runOnUtilityThreadPool(Runnable runnable) { + try { + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(runnable); + } catch (Exception e) { + logger.error(Strings.format("failed to fork [%s] to utility thread pool", runnable), e); + } + } + private class StreamSubscription implements Flow.Subscription { @Override public void request(long n) { @@ -142,7 +153,7 @@ private void requestOnMlThread(long n) { if (UTILITY_THREAD_POOL_NAME.equalsIgnoreCase(currentThreadPool)) { upstream.request(n); } else { - CompletableFuture.runAsync(() -> upstream.request(n), threadPool.executor(UTILITY_THREAD_POOL_NAME)); + runOnUtilityThreadPool(() -> upstream.request(n)); } } From db2eca345ddc9d8a9354a2ef7597638b647ae958 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:49:44 +0100 Subject: [PATCH 198/202] fixed testCCSClusterDetailsWhereAllShardsSkippedInCanMatch (#115774) --- muted-tests.yml | 3 --- .../elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java | 1 - 2 files changed, 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index c37733cb7307c..4128d41bf252c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -262,9 +262,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.xpack.search.CrossClusterAsyncSearchIT - method: testCCSClusterDetailsWhereAllShardsSkippedInCanMatch - issue: https://github.com/elastic/elasticsearch/issues/115652 - class: org.elasticsearch.index.get.GetResultTests method: testToAndFromXContent issue: https://github.com/elastic/elasticsearch/issues/115688 diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 3cd8778069d0c..3b5647da1399f 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -287,7 +287,6 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except try { responseId = response.getId(); assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); if (dfs) { From da108fcb0d31540ff076f42d798a657624e364ec Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 28 Oct 2024 11:55:16 -0400 Subject: [PATCH 199/202] [CI] Set max-workers to 4 during agent image creation --- .buildkite/packer_cache.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/packer_cache.sh b/.buildkite/packer_cache.sh index 752914ba55c23..01e1ad5cd7823 100755 --- a/.buildkite/packer_cache.sh +++ b/.buildkite/packer_cache.sh @@ -29,6 +29,6 @@ for branch in "${branches[@]}"; do fi export JAVA_HOME="$HOME/.java/$ES_BUILD_JAVA" - "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI + "checkout/${branch}/gradlew" --project-dir "$CHECKOUT_DIR" --parallel -s resolveAllDependencies -Dorg.gradle.warning.mode=none -DisCI --max-workers=4 rm -rf "checkout/${branch}" done From 2b1dc5adbe2ee4b6ef5a1a97eba785e03445bcb0 Mon Sep 17 00:00:00 2001 From: Francesco Gualazzi Date: Mon, 28 Oct 2024 17:47:45 +0100 Subject: [PATCH 200/202] plugin(apm-data): amend logs-apm component template (#115778) Restore standard mode after LogsDB took precedence. Related: #114501 --- .../resources/component-templates/logs-apm@settings.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml index 6bb8079c534c8..af84ba50fb075 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm@settings.yaml @@ -5,5 +5,6 @@ _meta: managed: true template: settings: - codec: best_compression - mode: standard + index: + codec: best_compression + mode: standard From 232622afd3d32ee1aace64d01385111e9cfc0bed Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Oct 2024 18:02:19 +0100 Subject: [PATCH 201/202] Tweak LogsdbIndexingRollingUpgradeIT and TsdbIndexingRollingUpgradeIT (#115785) to fix following test bugs: * Deal with trail already started error. * Ensure k8s.pod.name field is mapped as keyword Closes #115755, #115756, #115757, #115758, #115758 --- .../upgrades/LogsdbIndexingRollingUpgradeIT.java | 12 ++++++++++-- .../upgrades/TsdbIndexingRollingUpgradeIT.java | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java index 9bdc43543e331..226cb3dda2ba1 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsdbIndexingRollingUpgradeIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.rest.ObjectPath; @@ -21,6 +22,7 @@ import java.io.IOException; import java.io.InputStream; import java.time.Instant; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.enableLogsdbByDefault; @@ -73,7 +75,7 @@ public void testIndexing() throws Exception { if (isOldCluster()) { startTrial(); enableLogsdbByDefault(); - createTemplate(dataStreamName, "3", TEMPLATE); + createTemplate(dataStreamName, getClass().getSimpleName().toLowerCase(Locale.ROOT), TEMPLATE); Instant startTime = Instant.now().minusSeconds(60 * 60); bulkIndex(dataStreamName, 4, 1024, startTime); @@ -233,7 +235,13 @@ void query(String dataStreamName) throws Exception { protected static void startTrial() throws IOException { Request startTrial = new Request("POST", "/_license/start_trial"); startTrial.addParameter("acknowledge", "true"); - assertOK(client().performRequest(startTrial)); + try { + assertOK(client().performRequest(startTrial)); + } catch (ResponseException e) { + var responseBody = entityAsMap(e.getResponse()); + String error = ObjectPath.evaluate(responseBody, "error_message"); + assertThat(error, equalTo("Trial was already activated.")); + } } static Map getIndexSettingsWithDefaults(String index) throws IOException { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java index 1ac919ea57001..37e7aab94f7ca 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIndexingRollingUpgradeIT.java @@ -16,10 +16,13 @@ import org.elasticsearch.test.rest.ObjectPath; import java.time.Instant; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.upgrades.LogsIndexModeRollingUpgradeIT.getWriteBackingIndex; -import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.*; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.createTemplate; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.getIndexSettingsWithDefaults; +import static org.elasticsearch.upgrades.LogsdbIndexingRollingUpgradeIT.startTrial; import static org.elasticsearch.upgrades.TsdbIT.TEMPLATE; import static org.elasticsearch.upgrades.TsdbIT.formatInstant; import static org.hamcrest.Matchers.equalTo; @@ -42,7 +45,7 @@ public void testIndexing() throws Exception { String dataStreamName = "k9s"; if (isOldCluster()) { startTrial(); - createTemplate(dataStreamName, "2", TEMPLATE); + createTemplate(dataStreamName, getClass().getSimpleName().toLowerCase(Locale.ROOT), TEMPLATE); Instant startTime = Instant.now().minusSeconds(60 * 60); bulkIndex(dataStreamName, 4, 1024, startTime); @@ -52,6 +55,13 @@ public void testIndexing() throws Exception { assertThat(((Map) settings.get("settings")).get("index.mode"), equalTo("time_series")); assertThat(((Map) settings.get("defaults")).get("index.mapping.source.mode"), equalTo("SYNTHETIC")); + var mapping = getIndexMappingAsMap(firstBackingIndex); + assertThat( + "incorrect k8s.pod.name field in mapping:" + mapping, + "keyword", + equalTo(ObjectPath.evaluate(mapping, "properties.k8s.properties.pod.properties.name.type")) + ); + ensureGreen(dataStreamName); search(dataStreamName); query(dataStreamName); From 9992edc588814ff0d577b30303fac73546fa5118 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Mon, 28 Oct 2024 14:54:03 -0400 Subject: [PATCH 202/202] [ML] Fix stream support for TaskType.ANY (#115656) If we support one, then we support any. --- docs/changelog/115656.yaml | 5 +++++ .../xpack/inference/services/SenderService.java | 3 ++- .../services/amazonbedrock/AmazonBedrockServiceTests.java | 7 +++++++ .../services/anthropic/AnthropicServiceTests.java | 7 +++++++ .../services/azureaistudio/AzureAiStudioServiceTests.java | 7 +++++++ .../services/azureopenai/AzureOpenAiServiceTests.java | 7 +++++++ .../inference/services/cohere/CohereServiceTests.java | 7 +++++++ .../googleaistudio/GoogleAiStudioServiceTests.java | 7 +++++++ .../inference/services/openai/OpenAiServiceTests.java | 7 +++++++ 9 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115656.yaml diff --git a/docs/changelog/115656.yaml b/docs/changelog/115656.yaml new file mode 100644 index 0000000000000..13b612b052fc1 --- /dev/null +++ b/docs/changelog/115656.yaml @@ -0,0 +1,5 @@ +pr: 115656 +summary: Fix stream support for `TaskType.ANY` +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 71b38d7a0785a..953cf4cf6ad77 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -25,13 +25,14 @@ import org.elasticsearch.xpack.inference.external.http.sender.Sender; import java.io.IOException; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; public abstract class SenderService implements InferenceService { - protected static final Set COMPLETION_ONLY = Set.of(TaskType.COMPLETION); + protected static final Set COMPLETION_ONLY = EnumSet.of(TaskType.COMPLETION, TaskType.ANY); private final Sender sender; private final ServiceComponents serviceComponents; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java index 06c5a68987a9e..931d418a3664b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -1304,6 +1304,13 @@ public void testInfer_UnauthorizedResponse() throws IOException { } } + public void testSupportsStreaming() throws IOException { + try (var service = new AmazonBedrockService(mock(), mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public void testChunkedInfer_CallsInfer_ConvertsFloatResponse_ForEmbeddings() throws IOException { var model = AmazonBedrockEmbeddingsModelTests.createModel( "id", diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index 48277112d9306..c4f7fbfb14437 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -593,6 +593,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("blah"); } + public void testSupportsStreaming() throws IOException { + try (var service = new AnthropicService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private AnthropicService createServiceWithMockSender() { return new AnthropicService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index e85edf573ba96..4d2eb60767f44 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -1384,6 +1384,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new AzureAiStudioService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + // ---------------------------------------------------------------- private AzureAiStudioService createService() { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 3408fc358cac0..1bae6ce66d6aa 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -1504,6 +1504,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new AzureOpenAiService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private AzureOpenAiService createAzureOpenAiService() { return new AzureOpenAiService(mock(HttpRequestSender.Factory.class), createWithEmptySettings(threadPool)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 758c38166778b..d44be4246f844 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -1683,6 +1683,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("how dare you"); } + public void testSupportsStreaming() throws IOException { + try (var service = new CohereService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + private Map getRequestConfigMap( Map serviceSettings, Map taskSettings, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index e8382876868c5..27a53177658c6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -1219,6 +1219,13 @@ private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure si } } + public void testSupportsStreaming() throws IOException { + try (var service = new GoogleAiStudioService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public static Map buildExpectationCompletions(List completions) { return Map.of( ChatCompletionResults.COMPLETION, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index cf1438b334478..0698b9652b767 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -1077,6 +1077,13 @@ public void testInfer_StreamRequest_ErrorResponse() throws Exception { .hasErrorContaining("You didn't provide an API key..."); } + public void testSupportsStreaming() throws IOException { + try (var service = new OpenAiService(mock(), createWithEmptySettings(mock()))) { + assertTrue(service.canStream(TaskType.COMPLETION)); + assertTrue(service.canStream(TaskType.ANY)); + } + } + public void testCheckModelConfig_IncludesMaxTokens() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager);