From 6dd31071033e472c4d7094ae46db3a7f1f1ff76a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 21 Aug 2024 16:18:42 -0400 Subject: [PATCH 001/352] ESQL: Speed up attribute serialization tests (#112069) Speeds up the attribute serialization tests by building the random configuration one time, rather than over and over and over again. It's expensive to make a random configuration! --- .../function/AbstractAttributeTestCase.java | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index bc29e33c4a17f..c625ae5dfb61b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -20,17 +19,24 @@ import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.session.Configuration; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xpack.esql.ConfigurationTestUtils.randomConfiguration; import static org.hamcrest.Matchers.sameInstance; public abstract class AbstractAttributeTestCase extends AbstractWireSerializingTestCase< AbstractAttributeTestCase.ExtraAttribute> { + + /** + * We use a single random config for all serialization because it's pretty + * heavy to build, especially in {@link #testConcurrentSerialization()}. + */ + private Configuration config; + protected abstract T create(); protected abstract T mutate(T instance); @@ -56,7 +62,11 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { @Override protected final Writeable.Reader instanceReader() { - return ExtraAttribute::new; + return in -> { + PlanStreamInput pin = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), config); + pin.setTransportVersion(in.getTransportVersion()); + return new ExtraAttribute(pin); + }; } /** @@ -70,10 +80,8 @@ public static class ExtraAttribute implements Writeable { assertThat(a.source(), sameInstance(Source.EMPTY)); } - ExtraAttribute(StreamInput in) throws IOException { - PlanStreamInput ps = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), randomConfiguration()); - ps.setTransportVersion(in.getTransportVersion()); - a = ps.readNamedWriteable(Attribute.class); + ExtraAttribute(PlanStreamInput in) throws IOException { + a = in.readNamedWriteable(Attribute.class); } @Override From f0dbda75294bc7b5f157668553e1a7a5bc5294ca Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 21 Aug 2024 22:26:57 +0100 Subject: [PATCH 002/352] Expand docs on remote cluster proxying (#112025) It's not obvious from the docs that transport connections (including connections to remote clusters) use a custom binary protocol and require a _layer 4_ proxy. This commit clarifies this point. --- docs/reference/modules/network.asciidoc | 4 ++- .../modules/remote-clusters.asciidoc | 31 +++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 593aa79ded4d9..8fdc9f2e4f9cb 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -5,7 +5,9 @@ Each {es} node has two different network interfaces. Clients send requests to {es}'s REST APIs using its <>, but nodes communicate with other nodes using the <>. The transport interface is also used for communication with -<>. +<>. The transport interface uses a custom +binary protocol sent over <> TCP channels. +Both interfaces can be configured to use <>. You can configure both of these interfaces at the same time using the `network.*` settings. If you have a more complicated network, you might need to diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 25217302b7631..510ceb6ddb013 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -63,11 +63,13 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is created using a name and a list of seed nodes. When -a remote cluster is registered, its cluster state is retrieved from one of the -seed nodes and up to three _gateway nodes_ are selected as part of remote -cluster requests. This mode requires that the gateway node's publish addresses -are accessible by the local cluster. +In sniff mode, a cluster is registered with a name of your choosing and a list +of addresses of _seed_ nodes. When you register a remote cluster using sniff +mode, {es} retrieves from one of the seed nodes the addresses of up to three +_gateway nodes_. Each `remote_cluster_client` node in the local {es} cluster +then opens several TCP connections to the publish addresses of the gateway +nodes. This mode therefore requires that the gateway nodes' publish addresses +are accessible to nodes in the local cluster. + Sniff mode is the default connection mode. + @@ -84,15 +86,18 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is created using a name and a single proxy address. -When you register a remote cluster, a configurable number of socket connections -are opened to the proxy address. The proxy is required to route those -connections to the remote cluster. Proxy mode does not require remote cluster -nodes to have accessible publish addresses. +In proxy mode, a cluster is registered with a name of your choosing and the +address of a TCP (layer 4) reverse proxy which you must configure to route +connections to the nodes of the remote cluster. When you register a remote +cluster using proxy mode, {es} opens several TCP connections to the proxy +address and uses these connections to communicate with the remote cluster. In +proxy mode {es} disregards the publish addresses of the remote cluster nodes +which means that the publish addresses of the remote cluster nodes need not be +accessible to the local cluster. + -The proxy mode is not the default connection mode and must be configured. -Proxy mode has the same <> as sniff mode. +Proxy mode is not the default connection mode, so you must configure it +explicitly if desired. Proxy mode has the same <> as sniff mode. include::cluster/remote-clusters-api-key.asciidoc[] From fd37ef88c28744181d4628a05baed57098884bd9 Mon Sep 17 00:00:00 2001 From: Vishal Raj Date: Thu, 22 Aug 2024 00:12:24 +0100 Subject: [PATCH 003/352] [plugin/apm-data] Set fallback to legacy ILM policies (#112028) --- .../resources/index-templates/logs-apm.app@template.yaml | 3 +++ .../resources/index-templates/logs-apm.error@template.yaml | 3 +++ .../resources/index-templates/metrics-apm.app@template.yaml | 3 +++ .../index-templates/metrics-apm.internal@template.yaml | 3 +++ .../metrics-apm.service_destination.10m@template.yaml | 3 +++ .../metrics-apm.service_destination.1m@template.yaml | 3 +++ .../metrics-apm.service_destination.60m@template.yaml | 3 +++ .../metrics-apm.service_summary.10m@template.yaml | 3 +++ .../metrics-apm.service_summary.1m@template.yaml | 3 +++ .../metrics-apm.service_summary.60m@template.yaml | 3 +++ .../metrics-apm.service_transaction.10m@template.yaml | 3 +++ .../metrics-apm.service_transaction.1m@template.yaml | 3 +++ .../metrics-apm.service_transaction.60m@template.yaml | 3 +++ .../metrics-apm.transaction.10m@template.yaml | 3 +++ .../index-templates/metrics-apm.transaction.1m@template.yaml | 3 +++ .../metrics-apm.transaction.60m@template.yaml | 3 +++ .../resources/index-templates/traces-apm.rum@template.yaml | 3 +++ .../index-templates/traces-apm.sampled@template.yaml | 5 +++++ .../main/resources/index-templates/traces-apm@template.yaml | 3 +++ x-pack/plugin/apm-data/src/main/resources/resources.yaml | 2 +- 20 files changed, 60 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index 21cad50f3fe90..f74f1aa2e900e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -23,3 +23,6 @@ template: index: default_pipeline: logs-apm.app@default-pipeline final_pipeline: apm@pipeline + lifecycle: + name: logs-apm.app_logs-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 2cfa7b454722f..0ab9f01a76c5c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -30,3 +30,6 @@ template: index: default_pipeline: logs-apm.error@default-pipeline final_pipeline: apm@pipeline + lifecycle: + name: logs-apm.error_logs-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index a3c7ab7c05193..5659a5c2cbd55 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -24,3 +24,6 @@ template: index: default_pipeline: metrics-apm.app@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.app_metrics-default_policy + prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 4c7df377a6cfa..8e5fca051aaeb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -25,6 +25,9 @@ template: index: default_pipeline: metrics-apm.internal@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.internal_metrics-default_policy + prefer_ilm: false mappings: properties: data_stream.dataset: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 63c9ff9c3b988..23db583d3a30f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 6995a2d09b12e..4cbeb5053d072 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index b39d0beca3740..d29f953cb73a1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_destination_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index 8d92b21866bb8..57f63b9ed7dcc 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index de19df330aa0e..6b8e604e3f03e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index 002676eb08cc1..1c16e20a34f51 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_summary_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index 549af3942dcd3..db85407599f67 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 9bdacfc337663..9e3220b2c4c3a 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index 8bcbeb53c74fe..c10435b2b50a6 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.service_transaction_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 68c1dc0f31c1e..92c6a430a377d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_10m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 6065f6e12f999..78ed0959f270f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -26,6 +26,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_1m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index d8889ceb63f87..3625ecfc1458b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -27,6 +27,9 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline + lifecycle: + name: metrics-apm.transaction_60m_metrics-default_policy + prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index d299481ff6e21..53647284d2b91 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -25,6 +25,9 @@ template: index: default_pipeline: traces-apm.rum@default-pipeline final_pipeline: traces-apm@pipeline + lifecycle: + name: traces-apm.rum_traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 81457e2f204cb..9cffe241e0979 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -20,6 +20,11 @@ ignore_missing_component_templates: template: lifecycle: data_retention: 1h + settings: + index: + lifecycle: + name: traces-apm.sampled_traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index fda953171b793..bcf406faa71da 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -24,6 +24,9 @@ template: index: default_pipeline: traces-apm@default-pipeline final_pipeline: traces-apm@pipeline + lifecycle: + name: traces-apm.traces-default_policy + prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index fa38fda679e49..cd2111ffb9f83 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 6 +version: 7 component-templates: # Data lifecycle. From 7759b553b53a657da3d19bcb3d84070309357aee Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 22 Aug 2024 09:31:51 +0700 Subject: [PATCH 004/352] Improve zstd test coverage by adding a test duel (#112048) Adding test duel between out of the box stored fields codec and zstd stored field codecs: - lz4 compared to zstd level 0 (best speed) - deflate compared zstd level 3 (best compression) Relates #108706 --- .../codec/zstd/StoredFieldCodecDuelTests.java | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java diff --git a/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java new file mode 100644 index 0000000000000..93e9911746d18 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/zstd/StoredFieldCodecDuelTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.zstd; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.tests.index.ForceMergePolicy; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.codec.LegacyPerFieldMapperCodec; +import org.elasticsearch.index.codec.PerFieldMapperCodec; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class StoredFieldCodecDuelTests extends ESTestCase { + + private static final String STRING_FIELD = "string_field_1"; + private static final String BINARY_FIELD = "binary_field_2"; + private static final String INT_FIELD = "int_field_3"; + private static final String LONG_FIELD = "long_field_4"; + private static final String FLOAT_FIELD = "float_field_5"; + private static final String DOUBLE_FIELD = "double_field_5"; + + public void testDuelBestSpeed() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_SPEED, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + public void testDuelBestCompression() throws IOException { + var baseline = new LegacyPerFieldMapperCodec(Lucene99Codec.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + var contender = new PerFieldMapperCodec(Zstd814StoredFieldsFormat.Mode.BEST_COMPRESSION, null, BigArrays.NON_RECYCLING_INSTANCE); + doTestDuel(baseline, contender); + } + + static void doTestDuel(Codec baslineCodec, Codec contenderCodec) throws IOException { + try (var baselineDirectory = newDirectory(); var contenderDirectory = newDirectory()) { + int numDocs = randomIntBetween(256, 8096); + + var mergePolicy = new ForceMergePolicy(newLogMergePolicy()); + var baselineConfig = newIndexWriterConfig(); + baselineConfig.setMergePolicy(mergePolicy); + baselineConfig.setCodec(baslineCodec); + var contenderConf = newIndexWriterConfig(); + contenderConf.setCodec(contenderCodec); + contenderConf.setMergePolicy(mergePolicy); + + try ( + var baselineIw = new RandomIndexWriter(random(), baselineDirectory, baselineConfig); + var contenderIw = new RandomIndexWriter(random(), contenderDirectory, contenderConf) + ) { + for (int i = 0; i < numDocs; i++) { + Document doc = new Document(); + doc.add(new StoredField(STRING_FIELD, randomAlphaOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(BINARY_FIELD, randomByteArrayOfLength(randomIntBetween(1, 4096)))); + doc.add(new StoredField(INT_FIELD, randomInt())); + doc.add(new StoredField(LONG_FIELD, randomLong())); + doc.add(new StoredField(FLOAT_FIELD, randomFloat())); + doc.add(new StoredField(DOUBLE_FIELD, randomDouble())); + baselineIw.addDocument(doc); + contenderIw.addDocument(doc); + } + baselineIw.forceMerge(1); + contenderIw.forceMerge(1); + } + try (var baselineIr = DirectoryReader.open(baselineDirectory); var contenderIr = DirectoryReader.open(contenderDirectory)) { + assertEquals(1, baselineIr.leaves().size()); + assertEquals(1, contenderIr.leaves().size()); + + var baseLeafReader = baselineIr.leaves().get(0).reader(); + var contenderLeafReader = contenderIr.leaves().get(0).reader(); + assertEquals(baseLeafReader.maxDoc(), contenderLeafReader.maxDoc()); + + for (int docId = 0; docId < contenderLeafReader.maxDoc(); docId++) { + Document baselineDoc = baseLeafReader.storedFields().document(docId); + Document contenderDoc = contenderLeafReader.storedFields().document(docId); + assertThat(contenderDoc.getFields().size(), equalTo(baselineDoc.getFields().size())); + for (int i = 0; i < baselineDoc.getFields().size(); i++) { + var baselineField = baselineDoc.getFields().get(i); + var contenderField = contenderDoc.getFields().get(i); + assertThat(contenderField.name(), equalTo(baselineField.name())); + switch (baselineField.name()) { + case STRING_FIELD -> assertThat(contenderField.stringValue(), equalTo(baselineField.stringValue())); + case BINARY_FIELD -> assertThat(contenderField.binaryValue(), equalTo(baselineField.binaryValue())); + case INT_FIELD, LONG_FIELD, FLOAT_FIELD, DOUBLE_FIELD -> assertThat( + contenderField.numericValue(), + equalTo(baselineField.numericValue()) + ); + default -> fail("unexpected field [" + baselineField.name() + "]"); + } + } + } + } + } + } + +} From a226826786d27d2ed16cc509dd28d454f58ee3d1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 14:28:36 +1000 Subject: [PATCH 005/352] Mute org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests testDuel #112082 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f480938c24a13..18c04c774d487 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,6 +176,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112049 +- class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests + method: testDuel + issue: https://github.com/elastic/elasticsearch/issues/112082 # Examples: # From 158901577d27b426543cf1a30a7b9bd6cf93a0ba Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 15:09:35 +1000 Subject: [PATCH 006/352] Mute org.elasticsearch.xpack.inference.InferenceRestIT test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} #111999 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 18c04c774d487..96fa1c674a27e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests method: testDuel issue: https://github.com/elastic/elasticsearch/issues/112082 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} + issue: https://github.com/elastic/elasticsearch/issues/111999 # Examples: # From 10d665ba6bbbc007b42c2c85c6470323555c8be9 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:10:12 +0200 Subject: [PATCH 007/352] Collect APM metrics for failure stores (#108279) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds APM metrics for failure stores. See the JavaDoc comments in `FailureStoreMetrics.java` for a detailed explanation on the individual metrics. --- .../IngestFailureStoreMetricsIT.java | 428 ++++++++++++++++++ .../action/bulk/BulkOperation.java | 90 ++-- .../action/bulk/FailureStoreMetrics.java | 98 ++++ .../bulk/TransportAbstractBulkAction.java | 8 +- .../action/bulk/TransportBulkAction.java | 100 ++-- .../bulk/TransportSimulateBulkAction.java | 4 +- .../cluster/metadata/DataStream.java | 19 + .../elasticsearch/ingest/IngestService.java | 48 +- .../elasticsearch/node/NodeConstruction.java | 6 +- .../action/bulk/BulkOperationTests.java | 3 +- ...ActionIndicesThatCannotBeCreatedTests.java | 3 +- .../bulk/TransportBulkActionIngestTests.java | 14 +- .../action/bulk/TransportBulkActionTests.java | 21 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../ingest/ReservedPipelineActionTests.java | 4 +- .../ingest/IngestServiceTests.java | 33 +- .../ingest/SimulateIngestServiceTests.java | 17 +- .../snapshots/SnapshotResiliencyTests.java | 7 +- ...sportGetTrainedModelsStatsActionTests.java | 4 +- 19 files changed, 781 insertions(+), 129 deletions(-) create mode 100644 modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java create mode 100644 server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java new file mode 100644 index 0000000000000..a52016e8c7f0b --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -0,0 +1,428 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; +import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +/** + * An integration test that verifies how different paths/scenarios affect the APM metrics for failure stores. + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.SUITE) +public class IngestFailureStoreMetricsIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private String template; + private String dataStream; + private String pipeline; + + @Before + public void initializeRandomNames() { + template = "template-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + dataStream = "data-stream-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + pipeline = "pipeline-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + logger.info( + "--> running [{}] with generated names data stream [{}], template [{}] and pipeline [{}]", + getTestName(), + dataStream, + template, + pipeline + ); + } + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, CustomIngestTestPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + public void testNoPipelineNoFailures() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testFailingPipelineNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE, + false + ); + } + + public void testFailingPipelineWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("fail"); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.PIPELINE + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testShardFailureNoFailureStore() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + false + ); + } + + public void testShardFailureWithFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + /** + * Make sure the rejected counter gets incremented when there were shard-level failures while trying to redirect a document to the + * failure store. + */ + public void testRejectionFromFailureStore() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + + // Initialize failure store. + var rolloverRequest = new RolloverRequest(dataStream, null); + rolloverRequest.setIndicesOptions( + IndicesOptions.builder(rolloverRequest.indicesOptions()) + .failureStoreOptions(opts -> opts.includeFailureIndices(true).includeRegularIndices(false)) + .build() + ); + var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); + var failureStoreIndex = rolloverResponse.getNewIndex(); + // Add a write block to the failure store index, which causes shard-level "failures". + var addIndexBlockRequest = new AddIndexBlockRequest(IndexMetadata.APIBlock.WRITE, failureStoreIndex); + client().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest).actionGet(); + + int nrOfSuccessfulDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfSuccessfulDocs, null); + int nrOfFailingDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfFailingDocs, "\"foo\"", null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfSuccessfulDocs + nrOfFailingDocs, dataStream); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD + ); + assertMeasurements( + measurements.get(FailureStoreMetrics.METRIC_REJECTED), + nrOfFailingDocs, + dataStream, + FailureStoreMetrics.ErrorLocation.SHARD, + true + ); + } + + /** + * Make sure metrics get the correct data_stream attribute after a reroute. + */ + public void testRerouteSuccessfulCorrectName() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + + String destination = dataStream + "-destination"; + final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + createReroutePipeline(destination); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, destination); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDropping() throws IOException { + putComposableIndexTemplate(true); + createDataStream(); + createBasicPipeline("drop"); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs(dataStream, nrOfDocs, pipeline); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + public void testDataStreamAlias() throws IOException { + putComposableIndexTemplate(false); + createDataStream(); + var indicesAliasesRequest = new IndicesAliasesRequest(); + indicesAliasesRequest.addAliasAction( + IndicesAliasesRequest.AliasActions.add().alias("some-alias").index(dataStream).writeIndex(true) + ); + client().execute(TransportIndicesAliasesAction.TYPE, indicesAliasesRequest).actionGet(); + + int nrOfDocs = randomIntBetween(5, 10); + indexDocs("some-alias", nrOfDocs, null); + + var measurements = collectTelemetry(); + assertMeasurements(measurements.get(FailureStoreMetrics.METRIC_TOTAL), nrOfDocs, dataStream); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_FAILURE_STORE).size()); + assertEquals(0, measurements.get(FailureStoreMetrics.METRIC_REJECTED).size()); + } + + private void putComposableIndexTemplate(boolean failureStore) throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request(template); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStream + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, failureStore)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + client().execute(TransportPutComposableIndexTemplateAction.TYPE, request).actionGet(); + } + + private void createDataStream() { + final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + } + + private void createBasicPipeline(String processorType) { + createPipeline(Strings.format("\"%s\": {}", processorType)); + } + + private void createReroutePipeline(String destination) { + createPipeline(Strings.format("\"reroute\": {\"destination\": \"%s\"}", destination)); + } + + private void createPipeline(String processor) { + String pipelineDefinition = Strings.format("{\"processors\": [{%s}]}", processor); + BytesReference bytes = new BytesArray(pipelineDefinition); + clusterAdmin().putPipeline(new PutPipelineRequest(pipeline, bytes, XContentType.JSON)).actionGet(); + } + + private void indexDocs(String dataStream, int numDocs, String pipeline) { + indexDocs(dataStream, numDocs, "1", pipeline); + } + + private void indexDocs(String dataStream, int numDocs, String value, String pipeline) { + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + bulkRequest.add( + new IndexRequest(dataStream).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON) + .setPipeline(pipeline) + ); + } + client().bulk(bulkRequest).actionGet(); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertMeasurements(List measurements, int expectedSize, String expectedDataStream) { + assertMeasurements(measurements, expectedSize, expectedDataStream, (Consumer) null); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location + ) { + assertMeasurements( + measurements, + expectedSize, + expectedDataStream, + measurement -> assertEquals(location.name(), measurement.attributes().get("error_location")) + ); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + FailureStoreMetrics.ErrorLocation location, + boolean failureStore + ) { + assertMeasurements(measurements, expectedSize, expectedDataStream, measurement -> { + assertEquals(location.name(), measurement.attributes().get("error_location")); + assertEquals(failureStore, measurement.attributes().get("failure_store")); + }); + } + + private void assertMeasurements( + List measurements, + int expectedSize, + String expectedDataStream, + Consumer customAssertion + ) { + assertEquals(expectedSize, measurements.size()); + for (Measurement measurement : measurements) { + assertEquals(expectedDataStream, measurement.attributes().get("data_stream")); + if (customAssertion != null) { + customAssertion.accept(measurement); + } + } + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put( + "drop", + (factories, tag, description, config) -> new TestProcessor(tag, "drop", description, ingestDocument -> null) + ); + processors.put("reroute", (factories, tag, description, config) -> { + String destination = (String) config.remove("destination"); + return new TestProcessor( + tag, + "reroute", + description, + (Consumer) ingestDocument -> ingestDocument.reroute(destination) + ); + }); + processors.put( + "fail", + (processorFactories, tag, description, config) -> new TestProcessor(tag, "fail", description, new RuntimeException()) + ); + return processors; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 258e5b4c9a58d..813203afe42c5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -91,6 +93,7 @@ final class BulkOperation extends ActionRunnable { private final OriginSettingClient rolloverClient; private final Set failureStoresToBeRolledOver = ConcurrentCollections.newConcurrentSet(); private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); + private final FailureStoreMetrics failureStoreMetrics; BulkOperation( Task task, @@ -104,7 +107,8 @@ final class BulkOperation extends ActionRunnable { IndexNameExpressionResolver indexNameExpressionResolver, LongSupplier relativeTimeProvider, long startTimeNanos, - ActionListener listener + ActionListener listener, + FailureStoreMetrics failureStoreMetrics ) { this( task, @@ -120,7 +124,8 @@ final class BulkOperation extends ActionRunnable { startTimeNanos, listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), - new FailureStoreDocumentConverter() + new FailureStoreDocumentConverter(), + failureStoreMetrics ); } @@ -138,7 +143,8 @@ final class BulkOperation extends ActionRunnable { long startTimeNanos, ActionListener listener, ClusterStateObserver observer, - FailureStoreDocumentConverter failureStoreDocumentConverter + FailureStoreDocumentConverter failureStoreDocumentConverter, + FailureStoreMetrics failureStoreMetrics ) { super(listener); this.task = task; @@ -156,6 +162,7 @@ final class BulkOperation extends ActionRunnable { this.observer = observer; this.failureStoreDocumentConverter = failureStoreDocumentConverter; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } @Override @@ -437,17 +444,11 @@ public void onResponse(BulkShardResponse bulkShardResponse) { for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { // We zip the requests and responses together so that we can identify failed documents and potentially store them BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; if (bulkItemResponse.isFailed()) { - BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; - - DataStream failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - var cause = bulkItemResponse.getFailure().getCause(); - addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreReference.getName()); - } + processFailure(bulkItemRequest, bulkItemResponse.getFailure().getCause()); addFailure(bulkItemResponse); } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); @@ -464,11 +465,7 @@ public void onFailure(Exception e) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - DataStream failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); - if (failureStoreReference != null) { - maybeMarkFailureStoreForRollover(failureStoreReference); - addDocumentToRedirectRequests(request, e, failureStoreReference.getName()); - } + processFailure(request, e); addFailure(docWriteRequest, request.id(), indexName, e); } completeShardOperation(); @@ -479,45 +476,56 @@ private void completeShardOperation() { clusterState = null; releaseOnFinish.close(); } + + private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { + var errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(cause)); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + DataStream failureStoreCandidate = getRedirectTargetCandidate(docWriteRequest, getClusterState().metadata()); + // If the candidate is not null, the BulkItemRequest targets a data stream, but we'll still have to check if + // it has the failure store enabled. + if (failureStoreCandidate != null) { + // Do not redirect documents to a failure store that were already headed to one. + var isFailureStoreDoc = docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); + if (isFailureStoreDoc == false && failureStoreCandidate.isFailureStoreEnabled()) { + // Redirect to failure store. + maybeMarkFailureStoreForRollover(failureStoreCandidate); + addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); + failureStoreMetrics.incrementFailureStore( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD + ); + } else { + // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled + // or this request was already targeting a failure store), we increment the rejected counter. + failureStoreMetrics.incrementRejected( + bulkItemRequest.index(), + errorType, + FailureStoreMetrics.ErrorLocation.SHARD, + isFailureStoreDoc + ); + } + } + } }); } /** - * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream - * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for - * the redirection is returned. + * Tries to find a candidate redirect target for this write request. A candidate redirect target is a data stream that may or + * may not have the failure store enabled. * * @param docWriteRequest the write request to check * @param metadata cluster state metadata for resolving index abstractions - * @return a data stream if the write request points to a data stream that has the failure store enabled, or {@code null} if it does not + * @return a data stream if the write request points to a data stream, or {@code null} if it does not */ - private static DataStream getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + private static DataStream getRedirectTargetCandidate(DocWriteRequest docWriteRequest, Metadata metadata) { // Feature flag guard if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } - // Do not resolve a failure store for documents that were already headed to one - if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { - return null; - } // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); - if (ia == null) { - return null; - } - if (ia.isDataStreamRelated()) { - // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they - // will write to, not which _data stream_. - // We work backward to find the data stream from the concrete write index to cover this case. - Index concreteIndex = ia.getWriteIndex(); - IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); - DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. - return parentDataStream; - } - } - return null; + return DataStream.resolveDataStream(ia, metadata); } /** diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java new file mode 100644 index 0000000000000..5a36f10785790 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreMetrics.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.Map; + +/** + * A class containing APM metrics for failure stores. See the JavaDoc on the individual methods for an explanation on what they're tracking. + * General notes: + *
    + *
  • When a document is rerouted in a pipeline, the destination data stream is used for the metric attribute(s).
  • + *
+ */ +public class FailureStoreMetrics { + + public static final FailureStoreMetrics NOOP = new FailureStoreMetrics(MeterRegistry.NOOP); + + public static final String METRIC_TOTAL = "es.data_stream.ingest.documents.total"; + public static final String METRIC_FAILURE_STORE = "es.data_stream.ingest.documents.failure_store.total"; + public static final String METRIC_REJECTED = "es.data_stream.ingest.documents.rejected.total"; + + private final LongCounter totalCounter; + private final LongCounter failureStoreCounter; + private final LongCounter rejectedCounter; + + public FailureStoreMetrics(MeterRegistry meterRegistry) { + totalCounter = meterRegistry.registerLongCounter(METRIC_TOTAL, "total number of documents that were sent to a data stream", "unit"); + failureStoreCounter = meterRegistry.registerLongCounter( + METRIC_FAILURE_STORE, + "number of documents that got redirected to the failure store", + "unit" + ); + rejectedCounter = meterRegistry.registerLongCounter(METRIC_REJECTED, "number of documents that were rejected", "unit"); + } + + /** + * This counter tracks the number of documents that we tried to index into a data stream. This includes documents + * that were dropped by a pipeline. This counter will only be incremented once for every incoming document (even when it gets + * redirected to the failure store and/or gets rejected). + * @param dataStream the name of the data stream + */ + public void incrementTotal(String dataStream) { + totalCounter.incrementBy(1, Map.of("data_stream", dataStream)); + } + + /** + * This counter tracks the number of documents that we tried to store into a failure store. This includes both pipeline and + * shard-level failures. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + */ + public void incrementFailureStore(String dataStream, String errorType, ErrorLocation errorLocation) { + failureStoreCounter.incrementBy( + 1, + Map.of("data_stream", dataStream, "error_type", errorType, "error_location", errorLocation.name()) + ); + } + + /** + * This counter tracks the number of documents that failed to get stored in Elasticsearch. Meaning, any document that did not get + * stored in the data stream or in its failure store. + * @param dataStream the name of the data stream + * @param errorType the error type (i.e. the name of the exception that was thrown) + * @param errorLocation where this failure occurred + * @param failureStore whether this failure occurred while trying to ingest into a failure store (true) or in the data + * stream itself (false) + */ + public void incrementRejected(String dataStream, String errorType, ErrorLocation errorLocation, boolean failureStore) { + rejectedCounter.incrementBy( + 1, + Map.of( + "data_stream", + dataStream, + "error_type", + errorType, + "error_location", + errorLocation.name(), + "failure_store", + failureStore + ) + ); + } + + public enum ErrorLocation { + PIPELINE, + SHARD; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index c44ad505aea84..74864abe3ec50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -222,7 +222,7 @@ private void processBulkIndexIngestRequest( original.numberOfActions(), () -> bulkRequestModifier, bulkRequestModifier::markItemAsDropped, - (indexName) -> shouldStoreFailure(indexName, metadata, threadPool.absoluteTimeInMillis()), + (indexName) -> resolveFailureStore(indexName, metadata, threadPool.absoluteTimeInMillis()), bulkRequestModifier::markItemForFailureStore, bulkRequestModifier::markItemAsFailed, (originalThread, exception) -> { @@ -274,13 +274,15 @@ public boolean isForceExecution() { /** * Determines if an index name is associated with either an existing data stream or a template * for one that has the failure store enabled. + * * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. + * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a + * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. */ - protected abstract boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis); + protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); /** * Retrieves the {@link IndexRequest} from the provided {@link DocWriteRequest} for index or upsert actions. Upserts are diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index a695e0f5e8ab6..bdda4ff487f6b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; @@ -57,7 +56,6 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.SortedMap; import java.util.concurrent.Executor; @@ -82,6 +80,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; + private final FailureStoreMetrics failureStoreMetrics; @Inject public TransportBulkAction( @@ -94,7 +93,8 @@ public TransportBulkAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, - SystemIndices systemIndices + SystemIndices systemIndices, + FailureStoreMetrics failureStoreMetrics ) { this( threadPool, @@ -107,7 +107,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - threadPool::relativeTimeInNanos + threadPool::relativeTimeInNanos, + failureStoreMetrics ); } @@ -122,7 +123,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { this( TYPE, @@ -137,7 +139,8 @@ public TransportBulkAction( indexNameExpressionResolver, indexingPressure, systemIndices, - relativeTimeProvider + relativeTimeProvider, + failureStoreMetrics ); } @@ -154,7 +157,8 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + FailureStoreMetrics failureStoreMetrics ) { super( bulkAction, @@ -173,6 +177,7 @@ public TransportBulkAction( this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); + this.failureStoreMetrics = failureStoreMetrics; } public static ActionListener unwrappingSingleItemBulkResponse( @@ -199,6 +204,8 @@ protected void doInternalExecute( ActionListener listener, long relativeStartTimeNanos ) { + trackIndexRequests(bulkRequest); + Map indicesToAutoCreate = new HashMap<>(); Set dataStreamsToBeRolledOver = new HashSet<>(); Set failureStoresToBeRolledOver = new HashSet<>(); @@ -216,6 +223,27 @@ protected void doInternalExecute( ); } + /** + * Track the number of index requests in our APM metrics. We'll track almost all docs here (pipeline or no pipeline, + * failure store or original), but some docs don't reach this place (dropped and rejected docs), so we increment for those docs in + * different places. + */ + private void trackIndexRequests(BulkRequest bulkRequest) { + final Metadata metadata = clusterService.state().metadata(); + for (DocWriteRequest request : bulkRequest.requests) { + if (request instanceof IndexRequest == false) { + continue; + } + String resolvedIndexName = IndexNameExpressionResolver.resolveDateMathExpression(request.index()); + IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(resolvedIndexName); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, metadata); + // We only track index requests into data streams. + if (dataStream != null) { + failureStoreMetrics.incrementTotal(dataStream.getName()); + } + } + } + /** * Determine all the targets (i.e. indices, data streams, failure stores) that require an action before we can proceed with the bulk * request. Indices might need to be created, and data streams and failure stores might need to be rolled over when they're marked @@ -535,29 +563,29 @@ void executeBulk( indexNameExpressionResolver, relativeTimeNanosProvider, startTimeNanos, - listener + listener, + failureStoreMetrics ).run(); } /** - * Determines if an index name is associated with either an existing data stream or a template - * for one that has the failure store enabled. - * @param indexName The index name to check. - * @param metadata Cluster state metadata. - * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if the given index name corresponds to a data stream with a failure store, - * or if it matches a template that has a data stream failure store enabled. + * See {@link #resolveFailureStore(String, Metadata, long)} */ - static boolean shouldStoreFailureInternal(String indexName, Metadata metadata, long epochMillis) { - return DataStream.isFailureStoreFeatureFlagEnabled() - && resolveFailureStoreFromMetadata(indexName, metadata, epochMillis).or( - () -> resolveFailureStoreFromTemplate(indexName, metadata) - ).orElse(false); + // Visibility for testing + static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { + return null; + } + var resolution = resolveFailureStoreFromMetadata(indexName, metadata, epochMillis); + if (resolution != null) { + return resolution; + } + return resolveFailureStoreFromTemplate(indexName, metadata); } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long time) { - return shouldStoreFailureInternal(indexName, metadata, time); + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long time) { + return resolveFailureInternal(indexName, metadata, time); } /** @@ -567,30 +595,24 @@ protected boolean shouldStoreFailure(String indexName, Metadata metadata, long t * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Optional resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { - return Optional.empty(); + return null; } // Get index abstraction, resolving date math if it exists IndexAbstraction indexAbstraction = metadata.getIndicesLookup() .get(IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis)); - - // We only store failures if the failure is being written to a data stream, - // not when directly writing to backing indices/failure stores if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { - return Optional.empty(); + return null; } - // Locate the write index for the abstraction, and check if it has a data stream associated with it. - // This handles alias resolution as well as data stream resolution. - Index writeIndex = indexAbstraction.getWriteIndex(); - assert writeIndex != null : "Could not resolve write index for resource [" + indexName + "]"; - IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); - DataStream targetDataStream = writeAbstraction.getParentDataStream(); + // We only store failures if the failure is being written to a data stream, + // not when directly writing to backing indices/failure stores + DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return Optional.of(targetDataStream != null && targetDataStream.isFailureStoreEnabled()); + return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); } /** @@ -599,9 +621,9 @@ private static Optional resolveFailureStoreFromMetadata(String indexNam * @param metadata Cluster state metadata. * @return true if the given index name corresponds to an index template with a data stream failure store enabled. */ - private static Optional resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { if (indexName == null) { - return Optional.empty(); + return null; } // Check to see if the index name matches any templates such that an index would have been attributed @@ -612,11 +634,11 @@ private static Optional resolveFailureStoreFromTemplate(String indexNam ComposableIndexTemplate composableIndexTemplate = metadata.templatesV2().get(template); if (composableIndexTemplate.getDataStreamTemplate() != null) { // Check if the data stream has the failure store enabled - return Optional.of(composableIndexTemplate.getDataStreamTemplate().hasFailureStore()); + return composableIndexTemplate.getDataStreamTemplate().hasFailureStore(); } } // Could not locate a failure store via template - return Optional.empty(); + return null; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index a4648a7accb5a..2312a75b91084 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -166,8 +166,8 @@ protected IngestService getIngestService(BulkRequest request) { } @Override - protected boolean shouldStoreFailure(String indexName, Metadata metadata, long epochMillis) { + protected Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis) { // A simulate bulk request should not change any persistent state in the system, so we never write to the failure store - return false; + return null; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 6b20399a1bc59..c9743c157a622 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -1376,6 +1376,25 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp } } + /** + * Resolve the index abstraction to a data stream. This handles alias resolution as well as data stream resolution. This does NOT + * resolve a data stream by providing a concrete backing index. + */ + public static DataStream resolveDataStream(IndexAbstraction indexAbstraction, Metadata metadata) { + // We do not consider concrete indices - only data streams and data stream aliases. + if (indexAbstraction == null || indexAbstraction.isDataStreamRelated() == false) { + return null; + } + + // Locate the write index for the abstraction, and check if it has a data stream associated with it. + Index writeIndex = indexAbstraction.getWriteIndex(); + if (writeIndex == null) { + return null; + } + IndexAbstraction writeAbstraction = metadata.getIndicesLookup().get(writeIndex.getName()); + return writeAbstraction.getParentDataStream(); + } + /** * Modifies the passed Instant object to be used as a bound for a timestamp field in TimeSeries. It needs to be called in both backing * index construction (rollover) and index selection for doc insertion. Failure to do so may lead to errors due to document timestamps diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0b1a135a17214..20f97e1871483 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -18,6 +19,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.DeletePipelineRequest; @@ -88,6 +90,7 @@ import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -117,6 +120,7 @@ public class IngestService implements ClusterStateApplier, ReportingService pipelines = Map.of(); private final ThreadPool threadPool; private final IngestMetric totalMetrics = new IngestMetric(); + private final FailureStoreMetrics failureStoreMetrics; private final List> ingestClusterStateListeners = new CopyOnWriteArrayList<>(); private volatile ClusterState state; @@ -190,7 +194,8 @@ public IngestService( List ingestPlugins, Client client, MatcherWatchdog matcherWatchdog, - DocumentParsingProvider documentParsingProvider + DocumentParsingProvider documentParsingProvider, + FailureStoreMetrics failureStoreMetrics ) { this.clusterService = clusterService; this.scriptService = scriptService; @@ -212,6 +217,7 @@ public IngestService( ); this.threadPool = threadPool; this.taskQueue = clusterService.createTaskQueue("ingest-pipelines", Priority.NORMAL, PIPELINE_TASK_EXECUTOR); + this.failureStoreMetrics = failureStoreMetrics; } /** @@ -228,6 +234,7 @@ public IngestService( this.taskQueue = ingestService.taskQueue; this.pipelines = ingestService.pipelines; this.state = ingestService.state; + this.failureStoreMetrics = ingestService.failureStoreMetrics; } private static Map processorFactories(List ingestPlugins, Processor.Parameters parameters) { @@ -691,7 +698,7 @@ private static IngestPipelinesExecutionResult failAndStoreFor(String index, Exce * @param actionRequests The collection of requests to be processed. * @param onDropped A callback executed when a document is dropped by a pipeline. * Accepts the slot in the collection of requests that the document occupies. - * @param shouldStoreFailure A predicate executed on each ingest failure to determine if the + * @param resolveFailureStore A function executed on each ingest failure to determine if the * failure should be stored somewhere. * @param onStoreFailure A callback executed when a document fails ingest but the failure should * be persisted elsewhere. Accepts the slot in the collection of requests @@ -709,7 +716,7 @@ public void executeBulkRequest( final int numberOfActionRequests, final Iterable> actionRequests, final IntConsumer onDropped, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final TriConsumer onStoreFailure, final BiConsumer onFailure, final BiConsumer onCompletion, @@ -794,7 +801,7 @@ public void onFailure(Exception e) { } ); - executePipelines(pipelines, indexRequest, ingestDocument, shouldStoreFailure, documentListener); + executePipelines(pipelines, indexRequest, ingestDocument, resolveFailureStore, documentListener); indexRequest.setNormalisedBytesParsed(meteringParserDecorator.meteredDocumentSize().ingestedBytes()); assert actionRequest.index() != null; @@ -885,7 +892,7 @@ private void executePipelines( final PipelineIterator pipelines, final IndexRequest indexRequest, final IngestDocument ingestDocument, - final Predicate shouldStoreFailure, + final Function resolveFailureStore, final ActionListener listener ) { assert pipelines.hasNext(); @@ -898,9 +905,22 @@ private void executePipelines( ingestDocument.resetReroute(); final String originalIndex = indexRequest.indices()[0]; final Consumer exceptionHandler = (Exception e) -> { - if (shouldStoreFailure.test(originalIndex)) { + String errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(e)); + // If `failureStoreResolution` is true, we store the failure. If it's false, the target is a data stream, + // but it doesn't have the failure store enabled. If it's null, the target wasn't a data stream. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null && failureStoreResolution) { + failureStoreMetrics.incrementFailureStore(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE); listener.onResponse(IngestPipelinesExecutionResult.failAndStoreFor(originalIndex, e)); } else { + if (failureStoreResolution != null) { + // If this document targeted a data stream that didn't have the failure store enabled, we increment + // the rejected counter. + // We also increment the total counter because this request will not reach the code that increments + // the total counter for non-rejected documents. + failureStoreMetrics.incrementTotal(originalIndex); + failureStoreMetrics.incrementRejected(originalIndex, errorType, FailureStoreMetrics.ErrorLocation.PIPELINE, false); + } listener.onFailure(e); } }; @@ -928,6 +948,20 @@ private void executePipelines( } if (keep == false) { + // We only increment the total counter for dropped docs here, because these docs don't reach the code + // that ordinarily take care of that. + // We reuse `resolveFailureStore` here to determine whether the index request targets a data stream, + // because we only want to track these metrics for data streams. + Boolean failureStoreResolution = resolveFailureStore.apply(originalIndex); + if (failureStoreResolution != null) { + // Get index abstraction, resolving date math if it exists + IndexAbstraction indexAbstraction = state.metadata() + .getIndicesLookup() + .get(IndexNameExpressionResolver.resolveDateMathExpression(originalIndex, threadPool.absoluteTimeInMillis())); + DataStream dataStream = DataStream.resolveDataStream(indexAbstraction, state.metadata()); + String dataStreamName = dataStream != null ? dataStream.getName() : originalIndex; + failureStoreMetrics.incrementTotal(dataStreamName); + } listener.onResponse(IngestPipelinesExecutionResult.DISCARD_RESULT); return; // document dropped! } @@ -1019,7 +1053,7 @@ private void executePipelines( } if (newPipelines.hasNext()) { - executePipelines(newPipelines, indexRequest, ingestDocument, shouldStoreFailure, listener); + executePipelines(newPipelines, indexRequest, ingestDocument, resolveFailureStore, listener); } else { // update the index request's source and (potentially) cache the timestamp for TSDB updateIndexRequestSource(indexRequest, ingestDocument); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index a4db9a0a0e149..9c5b72a573d44 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction; import org.elasticsearch.action.admin.indices.template.reservedstate.ReservedComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; @@ -659,6 +660,7 @@ private void construct( modules.bindToInstance(DocumentParsingProvider.class, documentParsingProvider); + FailureStoreMetrics failureStoreMetrics = new FailureStoreMetrics(telemetryProvider.getMeterRegistry()); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -668,7 +670,8 @@ private void construct( pluginsService.filterPlugins(IngestPlugin.class).toList(), client, IngestService.createGrokThreadWatchdog(environment, threadPool), - documentParsingProvider + documentParsingProvider, + failureStoreMetrics ); SystemIndices systemIndices = createSystemIndices(settings); @@ -1154,6 +1157,7 @@ record PluginServiceInstances( b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); + b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index 76bf8dc79b855..e950901a538b4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -1164,7 +1164,8 @@ private BulkOperation newBulkOperation( timeZero, listener, observer, - failureStoreDocumentConverter + failureStoreDocumentConverter, + FailureStoreMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 4ca4e7158e454..1d3d514da13a3 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -130,7 +130,8 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) mock(ActionFilters.class), indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) { @Override void executeBulk( diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 3683c2c271739..609237f268807 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -69,7 +69,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; -import java.util.function.Predicate; +import java.util.function.Function; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; @@ -110,7 +110,7 @@ public class TransportBulkActionIngestTests extends ESTestCase { /** Arguments to callbacks we want to capture, but which require generics, so we must use @Captor */ @Captor - ArgumentCaptor> redirectPredicate; + ArgumentCaptor> redirectPredicate; @Captor ArgumentCaptor> redirectHandler; @Captor @@ -155,7 +155,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -410,9 +411,10 @@ public void testIngestLocal() throws Exception { Iterator> req = bulkDocsItr.getValue().iterator(); failureHandler.getValue().accept(0, exception); // have an exception for our one index request indexRequest2.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing - assertTrue(redirectPredicate.getValue().test(WITH_FAILURE_STORE_ENABLED + "-1")); // ensure redirects on failure store data stream - assertFalse(redirectPredicate.getValue().test(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices - assertFalse(redirectPredicate.getValue().test("index")); // no redirects for non-existant indices with no templates + // ensure redirects on failure store data stream + assertTrue(redirectPredicate.getValue().apply(WITH_FAILURE_STORE_ENABLED + "-1")); + assertNull(redirectPredicate.getValue().apply(WITH_DEFAULT_PIPELINE)); // no redirects for random existing indices + assertNull(redirectPredicate.getValue().apply("index")); // no redirects for non-existent indices with no templates redirectHandler.getValue().apply(2, WITH_FAILURE_STORE_ENABLED + "-1", exception); // exception and redirect for request 3 (slot 2) completionHandler.getValue().accept(DUMMY_WRITE_THREAD, null); // all ingestion completed assertTrue(action.isExecuted); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index db3a985c00ad0..ed7cc93f0ab43 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.junit.Assume.assumeThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -103,7 +104,8 @@ class TestTransportBulkAction extends TransportBulkAction { new ActionFilters(Collections.emptySet()), new Resolver(), new IndexingPressure(Settings.EMPTY), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ); } @@ -417,13 +419,16 @@ public void testResolveFailureStoreFromMetadata() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithFailureStore, metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dataStreamWithoutFailureStore, metadata, testTime), is(false)); // An index should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(backingIndex1.getIndex().getName(), metadata, testTime), is(nullValue())); // even if that index is itself a failure store - assertThat(TransportBulkAction.shouldStoreFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), is(false)); + assertThat( + TransportBulkAction.resolveFailureInternal(failureStoreIndex1.getIndex().getName(), metadata, testTime), + is(nullValue()) + ); } public void testResolveFailureStoreFromTemplate() throws Exception { @@ -454,11 +459,11 @@ public void testResolveFailureStoreFromTemplate() throws Exception { .build(); // Data stream with failure store should store failures - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithFailureStore + "-1", metadata, testTime), is(true)); // Data stream without failure store should not - assertThat(TransportBulkAction.shouldStoreFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(dsTemplateWithoutFailureStore + "-1", metadata, testTime), is(false)); // An index template should not be considered for failure storage - assertThat(TransportBulkAction.shouldStoreFailureInternal(indexTemplate + "-1", metadata, testTime), is(false)); + assertThat(TransportBulkAction.resolveFailureInternal(indexTemplate + "-1", metadata, testTime), is(nullValue())); } private BulkRequest buildBulkRequest(List indices) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 09513351652b8..626f07fe61216 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -254,7 +254,8 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, - relativeTimeProvider + relativeTimeProvider, + FailureStoreMetrics.NOOP ); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index aea3359e18bf6..b620495472e28 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -90,7 +91,8 @@ public void setup() { Collections.singletonList(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("set")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index bc81614c9e237..5c07c2344cf13 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -88,9 +89,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntConsumer; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils.executeAndAssertSuccessful; @@ -152,7 +153,8 @@ public void testIngestPlugin() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); Map factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); @@ -172,7 +174,8 @@ public void testIngestPluginDuplicate() { List.of(DUMMY_PLUGIN, DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ) ); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); @@ -189,7 +192,8 @@ public void testExecuteIndexPipelineDoesNotExist() { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") .source(Map.of()) @@ -1665,7 +1669,7 @@ public void testExecuteFailureRedirection() throws Exception { .setFinalPipeline("_id2"); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectCheck = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1722,7 +1726,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); doThrow(new RuntimeException()).when(processor) .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Map.of()), any()); - final Predicate redirectPredicate = (idx) -> indexRequest.index().equals(idx); + final Function redirectCheck = (idx) -> indexRequest.index().equals(idx); @SuppressWarnings("unchecked") final TriConsumer redirectHandler = mock(TriConsumer.class); @SuppressWarnings("unchecked") @@ -1733,7 +1737,7 @@ public void testExecuteFailureRedirectionWithNestedOnFailure() throws Exception 1, List.of(indexRequest), indexReq -> {}, - redirectPredicate, + redirectCheck, redirectHandler, failureHandler, completionHandler, @@ -1826,9 +1830,9 @@ public void testBulkRequestExecution() throws Exception { for (int i = 0; i < numRequest; i++) { IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none"); indexRequest.source(xContentType, "field1", "value1"); - boolean shouldListExecutedPipelines = randomBoolean(); - executedPipelinesExpected.add(shouldListExecutedPipelines); - indexRequest.setListExecutedPipelines(shouldListExecutedPipelines); + boolean shouldListExecutedPiplines = randomBoolean(); + executedPipelinesExpected.add(shouldListExecutedPiplines); + indexRequest.setListExecutedPipelines(shouldListExecutedPiplines); bulkRequest.add(indexRequest); } @@ -2320,7 +2324,8 @@ public Map getProcessors(Processor.Parameters paramet List.of(testPlugin), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -2675,7 +2680,8 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { List.of(DUMMY_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, clusterState)); @@ -2974,7 +2980,8 @@ public Map getProcessors(final Processor.Parameters p }), client, null, - documentParsingProvider + documentParsingProvider, + FailureStoreMetrics.NOOP ); if (randomBoolean()) { /* diff --git a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java index 30145ab37c322..18f66676cfd1f 100644 --- a/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/SimulateIngestServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.SimulateBulkRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -115,11 +116,23 @@ private static IngestService createWithProcessors(Map ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.generic()).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); - return new IngestService(mock(ClusterService.class), threadPool, null, null, null, List.of(new IngestPlugin() { + var ingestPlugin = new IngestPlugin() { @Override public Map getProcessors(final Processor.Parameters parameters) { return processors; } - }), client, null, DocumentParsingProvider.EMPTY_INSTANCE); + }; + return new IngestService( + mock(ClusterService.class), + threadPool, + null, + null, + null, + List.of(ingestPlugin), + client, + null, + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP + ); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index b54a786e05c9d..c6086a8259fbb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.index.IndexRequest; @@ -2395,14 +2396,16 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { Collections.emptyList(), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ), mockFeatureService, client, actionFilters, indexNameExpressionResolver, new IndexingPressure(settings), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + FailureStoreMetrics.NOOP ) ); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index f10df86cc23ae..9232d32e40a97 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -136,7 +137,8 @@ public void setUpVariables() { Collections.singletonList(SKINNY_INGEST_PLUGIN), client, null, - DocumentParsingProvider.EMPTY_INSTANCE + DocumentParsingProvider.EMPTY_INSTANCE, + FailureStoreMetrics.NOOP ); } From fe786b7b8f4a13e19751b1460b6f4fef90891156 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 22 Aug 2024 09:07:42 +0200 Subject: [PATCH 008/352] Force implementing bulk InputStream#read on StreamInput (#112072) We should enforce overriding here to avoid extremely slow byte-by-byte reads when using these instances as `InputStream`. I only found one case where this matters practically in the codebase but it's probably good to guard against it. --- .../common/io/stream/ByteArrayStreamInput.java | 7 +++++++ .../common/io/stream/FilterStreamInput.java | 5 +++++ .../org/elasticsearch/common/io/stream/StreamInput.java | 4 ++++ .../index/translog/BufferedChecksumStreamInput.java | 9 +++++++++ .../elasticsearch/common/io/stream/StreamInputTests.java | 9 +++++++++ .../xpack/eql/execution/sample/CircuitBreakerTests.java | 5 +++++ 6 files changed, 39 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 52eee5af3f6f5..838f2998d339f 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -117,4 +117,11 @@ public void readBytes(byte[] b, int offset, int len) { System.arraycopy(bytes, pos, b, offset, len); pos += len; } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int toRead = Math.min(len, available()); + readBytes(b, off, toRead); + return toRead; + } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index c0ef0e0abf39b..b84c67bd8c8a2 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -97,6 +97,11 @@ public int read() throws IOException { return delegate.read(); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return delegate.read(b, off, len); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index c4c18cfd376ad..ec0edb2d07e5a 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -104,6 +104,10 @@ public void setTransportVersion(TransportVersion version) { */ public abstract void readBytes(byte[] b, int offset, int len) throws IOException; + // force implementing bulk reads to avoid accidentally slow implementations + @Override + public abstract int read(byte[] b, int off, int len) throws IOException; + /** * Reads a bytes reference from this stream, copying any bytes read to a new {@code byte[]}. Use {@link #readReleasableBytesReference()} * when reading large bytes references where possible top avoid needless allocations and copying. diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 6d1456040c8fa..9420d923107e1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -66,6 +66,15 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { digest.update(b, offset, len); } + @Override + public int read(byte[] b, int off, int len) throws IOException { + int read = delegate.read(b, off, len); + if (read > 0) { + digest.update(b, off, read); + } + return read; + } + private static final ThreadLocal buffer = ThreadLocal.withInitial(() -> new byte[8]); @Override diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java index 645461778f637..cda1f9b0e29de 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/StreamInputTests.java @@ -24,6 +24,15 @@ public class StreamInputTests extends ESTestCase { private StreamInput in = Mockito.spy(StreamInput.class); + + { + try { + Mockito.when(in.skip(anyLong())).thenAnswer(a -> a.getArguments()[0]); + } catch (IOException e) { + throw new AssertionError(e); + } + } + byte[] bytes = "0123456789".getBytes(UTF_8); public void testCalculateByteLengthOfAscii() throws IOException { diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 943d1275364fb..1652495197fc0 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -260,6 +260,11 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { } + @Override + public int read(byte[] b, int off, int len) throws IOException { + return 0; + } + @Override public void close() throws IOException { From 967af10d58ac673645da64d9b37e23645fe45daf Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 22 Aug 2024 00:08:38 -0700 Subject: [PATCH 009/352] Fix DocValuesCodecDuelTests testDuel (#112084) We need to check the returned doc id from advance() before accessing the values of the current document. Closes #112082 --- muted-tests.yml | 3 --- .../index/codec/tsdb/DocValuesCodecDuelTests.java | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 96fa1c674a27e..bb13c2fc9a571 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,9 +176,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT method: testForceSleepsProfile {ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112049 -- class: org.elasticsearch.index.codec.tsdb.DocValuesCodecDuelTests - method: testDuel - issue: https://github.com/elastic/elasticsearch/issues/112082 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 diff --git a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java index 9b58e785131c9..20ae59e113c33 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/tsdb/DocValuesCodecDuelTests.java @@ -141,6 +141,9 @@ private void assertSortedDocValues(LeafReader baselineReader, LeafReader contend for (int i = 0; i < docIdsToAdvanceTo.length; i++) { int docId = docIdsToAdvanceTo[i]; int baselineTarget = assertAdvance(docId, baselineReader, contenderReader, baseline, contender); + if (baselineTarget == NO_MORE_DOCS) { + break; + } assertEquals(baseline.ordValue(), contender.ordValue()); assertEquals(baseline.lookupOrd(baseline.ordValue()), contender.lookupOrd(contender.ordValue())); i = shouldSkipDocIds(i, docId, baselineTarget, docIdsToAdvanceTo); From 31cdc432869f4b53890d939c942270cb9eb77030 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 22 Aug 2024 18:08:41 +1000 Subject: [PATCH 010/352] Mute org.elasticsearch.xpack.ml.integration.MlJobIT testDeleteJobAfterMissingIndex #112088 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index bb13c2fc9a571..cd484b1c46867 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAfterMissingIndex + issue: https://github.com/elastic/elasticsearch/issues/112088 # Examples: # From fb6c5a55dd7a7973841307545a204b4bfd35672a Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 22 Aug 2024 12:15:22 +0200 Subject: [PATCH 011/352] Update Gradle wrapper to 8.10 (#111736) --- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 43504 -> 43583 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index f7b1c8ff61774..8d04a0f38fab0 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.9 \ No newline at end of file +8.10 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 2c3521197d7c4586c843d1d3e9090525f1898cde..a4b76b9530d66f5e68d973ea569d8e19de379189 100644 GIT binary patch delta 3990 zcmV;H4{7l5(*nQL0Kr1kzC=_KMxQY0|W5(lc#i zH*M1^P4B}|{x<+fkObwl)u#`$GxKKV&3pg*-y6R6txw)0qU|Clf9Uds3x{_-**c=7 z&*)~RHPM>Rw#Hi1R({;bX|7?J@w}DMF>dQQU2}9yj%iLjJ*KD6IEB2^n#gK7M~}6R zkH+)bc--JU^pV~7W=3{E*4|ZFpDpBa7;wh4_%;?XM-5ZgZNnVJ=vm!%a2CdQb?oTa z70>8rTb~M$5Tp!Se+4_OKWOB1LF+7gv~$$fGC95ToUM(I>vrd$>9|@h=O?eARj0MH zT4zo(M>`LWoYvE>pXvqG=d96D-4?VySz~=tPVNyD$XMshoTX(1ZLB5OU!I2OI{kb) zS8$B8Qm>wLT6diNnyJZC?yp{Kn67S{TCOt-!OonOK7$K)e-13U9GlnQXPAb&SJ0#3 z+vs~+4Qovv(%i8g$I#FCpCG^C4DdyQw3phJ(f#y*pvNDQCRZ~MvW<}fUs~PL=4??j zmhPyg<*I4RbTz|NHFE-DC7lf2=}-sGkE5e!RM%3ohM7_I^IF=?O{m*uUPH(V?gqyc(Rp?-Qu(3bBIL4Fz(v?=_Sh?LbK{nqZMD>#9D_hNhaV$0ef3@9V90|0u#|PUNTO>$F=qRhg1duaE z0`v~X3G{8RVT@kOa-pU+z8{JWyP6GF*u2e8eKr7a2t1fuqQy)@d|Qn(%YLZ62TWtoX@$nL}9?atE#Yw`rd(>cr0gY;dT9~^oL;u)zgHUvxc2I*b&ZkGM-iq=&(?kyO(3}=P! zRp=rErEyMT5UE9GjPHZ#T<`cnD)jyIL!8P{H@IU#`e8cAG5jMK zVyKw7--dAC;?-qEu*rMr$5@y535qZ6p(R#+fLA_)G~!wnT~~)|s`}&fA(s6xXN`9j zP#Fd3GBa#HeS{5&8p?%DKUyN^X9cYUc6vq}D_3xJ&d@=6j(6BZKPl?!k1?!`f3z&a zR4ZF60Mx7oBxLSxGuzA*Dy5n-d2K=+)6VMZh_0KetK|{e;E{8NJJ!)=_E~1uu=A=r zrn&gh)h*SFhsQJo!f+wKMIE;-EOaMSMB@aXRU(UcnJhZW^B^mgs|M9@5WF@s6B0p& zm#CTz)yiQCgURE{%hjxHcJ6G&>G9i`7MyftL!QQd5 z@RflRs?7)99?X`kHNt>W3l7YqscBpi*R2+fsgABor>KVOu(i(`03aytf2UA!&SC9v z!E}whj#^9~=XHMinFZ;6UOJjo=mmNaWkv~nC=qH9$s-8roGeyaW-E~SzZ3Gg>j zZ8}<320rg4=$`M0nxN!w(PtHUjeeU?MvYgWKZ6kkzABK;vMN0|U;X9abJleJA(xy<}5h5P(5 z{RzAFPvMnX2m0yH0Jn2Uo-p`daE|(O`YQiC#jB8;6bVIUf?SY(k$#C0`d6qT`>Xe0+0}Oj0=F&*D;PVe=Z<=0AGI<6$gYLwa#r` zm449x*fU;_+J>Mz!wa;T-wldoBB%&OEMJgtm#oaI60TSYCy7;+$5?q!zi5K`u66Wq zvg)Fx$s`V3Em{=OEY{3lmh_7|08ykS&U9w!kp@Ctuzqe1JFOGz6%i5}Kmm9>^=gih z?kRxqLA<3@e=}G4R_?phW{4DVr?`tPfyZSN@R=^;P;?!2bh~F1I|fB7P=V=9a6XU5 z<#0f>RS0O&rhc&nTRFOW7&QhevP0#>j0eq<1@D5yAlgMl5n&O9X|Vq}%RX}iNyRFF z7sX&u#6?E~bm~N|z&YikXC=I0E*8Z$v7PtWfjy)$e_Ez25fnR1Q=q1`;U!~U>|&YS zaOS8y!^ORmr2L4ik!IYR8@Dcx8MTC=(b4P6iE5CnrbI~7j7DmM8em$!da&D!6Xu)!vKPdLG z9f#)se|6=5yOCe)N6xDhPI!m81*dNe7u985zi%IVfOfJh69+#ag4ELzGne?o`eA`42K4T)h3S+s)5IT97%O>du- z0U54L8m4}rkRQ?QBfJ%DLssy^+a7Ajw;0&`NOTY4o;0-ivm9 zBz1C%nr_hQ)X)^QM6T1?=yeLkuG9Lf50(eH}`tFye;01&(p?8i+6h};VV-2B~qdxeC#=X z(JLlzy&fHkyi9Ksbcs~&r^%lh^2COldLz^H@X!s~mr9Dr6z!j+4?zkD@Ls7F8(t(f z9`U?P$Lmn*Y{K}aR4N&1N=?xtQ1%jqf1~pJyQ4SgBrEtR`j4lQuh7cqP49Em5cO=I zB(He2`iPN5M=Y0}h(IU$37ANTGx&|b-u1BYA*#dE(L-lptoOpo&th~E)_)y-`6kSH z3vvyVrcBwW^_XYReJ=JYd9OBQrzv;f2AQdZH#$Y{Y+Oa33M70XFI((fs;mB4e`<<{ ze4dv2B0V_?Ytsi>>g%qs*}oDGd5d(RNZ*6?7qNbdp7wP4T72=F&r?Ud#kZr8Ze5tB z_oNb7{G+(o2ajL$!69FW@jjPQ2a5C)m!MKKRirC$_VYIuVQCpf9rIms0GRDf)8AH${I`q^~5rjot@#3$2#zT2f`(N^P7Z;6(@EK$q*Jgif00I6*^ZGV+XB5uw*1R-@23yTw&WKD{s1;HTL;dO)%5i#`dc6b7;5@^{KU%N|A-$zsYw4)7LA{3`Zp>1 z-?K9_IE&z)dayUM)wd8K^29m-l$lFhi$zj0l!u~4;VGR6Y!?MAfBC^?QD53hy6VdD z@eUZIui}~L%#SmajaRq1J|#> z4m=o$vZ*34=ZWK2!QMNEcp2Lbc5N1q!lEDq(bz0b;WI9;e>l=CG9^n#ro`w>_0F$Q zfZ={2QyTkfByC&gy;x!r*NyXXbk=a%~~(#K?< zTke0HuF5{Q+~?@!KDXR|g+43$+;ab`^flS%miup_0OUTm=nIc%d5nLP)i308PIjl_YMF6cpQ__6&$n6it8K- z8PIjl_YMF6cpQ_!r)L8IivW`WdK8mBs6PXdjR2DYdK8nCs73=4j{uVadK8oNjwX|E wpAeHLsTu^*Y>Trk?aBtSQ(D-o$(D8Px^?ZI-PUB? z*1fv!{YdHme3Fc8%cR@*@zc5A_nq&2=R47Hp@$-JF4Fz*;SLw5}K^y>s-s;V!}b2i=5=M- zComP?ju>8Fe@=H@rlwe1l`J*6BTTo`9b$zjQ@HxrAhp0D#u?M~TxGC_!?ccCHCjt| zF*PgJf@kJB`|Ml}cmsyrAjO#Kjr^E5p29w+#>$C`Q|54BoDv$fQ9D?3n32P9LPMIzu?LjNqggOH=1@T{9bMn*u8(GI z!;MLTtFPHal^S>VcJdiYqX0VU|Rn@A}C1xOlxCribxes0~+n2 z6qDaIA2$?e`opx3_KW!rAgbpzU)gFdjAKXh|5w``#F0R|c)Y)Du0_Ihhz^S?k^pk% zP>9|pIDx)xHH^_~+aA=^$M!<8K~Hy(71nJGf6`HnjtS=4X4=Hk^O71oNia2V{HUCC zoN3RSBS?mZCLw;l4W4a+D8qc)XJS`pUJ5X-f^1ytxwr`@si$lAE?{4G|o; zO0l>`rr?;~c;{ZEFJ!!3=7=FdGJ?Q^xfNQh4A?i;IJ4}B+A?4olTK(fN++3CRBP97 ze~lG9h%oegkn)lpW-4F8o2`*WW0mZHwHez`ko@>U1_;EC_6ig|Drn@=DMV9YEUSCa zIf$kHei3(u#zm9I!Jf(4t`Vm1lltJ&lVHy(eIXE8sy9sUpmz%I_gA#8x^Zv8%w?r2 z{GdkX1SkzRIr>prRK@rqn9j2wG|rUvf6PJbbin=yy-TAXrguvzN8jL$hUrIXzr^s5 zVM?H4;eM-QeRFr06@ifV(ocvk?_)~N@1c2ien56UjWXid6W%6ievIh)>dk|rIs##^kY67ib8Kw%#-oVFaXG7$ERyA9(NSJUvWiOA5H(!{uOpcW zg&-?iqPhds%3%tFspHDqqr;A!e@B#iPQjHd=c>N1LoOEGRehVoPOdxJ>b6>yc#o#+ zl8s8!(|NMeqjsy@0x{8^j0d00SqRZjp{Kj)&4UHYGxG+z9b-)72I*&J70?+8e?p_@ z=>-(>l6z5vYlP~<2%DU02b!mA{7mS)NS_eLe=t)sm&+Pmk?asOEKlkPQ)EUvvfC=;4M&*|I!w}(@V_)eUKLA_t^%`o z0PM9LV|UKTLnk|?M3u!|f2S0?UqZsEIH9*NJS-8lzu;A6-rr-ot=dg9SASoluZUkFH$7X; zP=?kYX!K?JL-b~<#7wU;b;eS)O;@?h%sPPk{4xEBxb{!sm0AY|f9cNvx6>$3F!*0c z75H=dy8JvTyO8}g1w{$9T$p~5en}AeSLoCF>_RT9YPMpChUjl310o*$QocjbH& zbnwg#gssR#jDVN{uEi3n(PZ%PFZ|6J2 z5_rBf0-u>e4sFe0*Km49ATi7>Kn0f9!uc|rRMR1Dtt6m1LW8^>qFlo}h$@br=Rmpi z;mI&>OF64Be{dVeHI8utrh)v^wsZ0jii%x8UgZ8TC%K~@I(4E};GFW&(;WVov}3%H zH;IhRkfD^(vt^DjZz(MyHLZxv8}qzPc(%itBkBwf_fC~sDBgh<3XAv5cxxfF3<2U! z03Xe&z`is!JDHbe;mNmfkH+_LFE*I2^mdL@7(@9DfAcP6O04V-ko;Rpgp<%Cj5r8Z zd0`sXoIjV$j)--;jA6Zy^D5&5v$o^>e%>Q?9GLm{i~p^lAn!%ZtF$I~>39XVZxk0b zROh^Bk9cE0AJBLozZIEmy7xG(yHWGztvfnr0(2ro1%>zsGMS^EMu+S$r=_;9 zWwZkgf7Q7`H9sLf2Go^Xy6&h~a&%s2_T@_Csf19MntF$aVFiFkvE3_hUg(B@&Xw@YJ zpL$wNYf78=0c@!QU6_a$>CPiXT7QAGDM}7Z(0z#_ZA=fmLUj{2z7@Ypo71UDy8GHr z-&TLKf6a5WCf@Adle3VglBt4>Z>;xF}}-S~B7<(%B;Y z0QR55{z-buw>8ilNM3u6I+D$S%?)(p>=eBx-HpvZj{7c*_?K=d()*7q?93us}1dq%FAFYLsW8ZTQ_XZLh`P2*6(NgS}qGcfGXVWpwsp#Rs}IuKbk*`2}&) zI^Vsk6S&Q4@oYS?dJ`NwMVBs6f57+RxdqVub#PvMu?$=^OJy5xEl0<5SLsSRy%%a0 zi}Y#1-F3m;Ieh#Y12UgW?-R)|eX>ZuF-2cc!1>~NS|XSF-6In>zBoZg+ml!6%fk7U zw0LHcz8VQk(jOJ+Yu)|^|15ufl$KQd_1eUZZzj`aC%umU6F1&D5XVWce_wAe(qCSZ zpX-QF4e{EmEVN9~6%bR5U*UT{eMHfcUo`jw*u?4r2s_$`}U{?NjvEm(u&<>B|%mq$Q3weshxk z76<``8vh{+nX`@9CB6IE&z)I%IFjR^LH{s1p|eppv=x za(g_jLU|xjWMAn-V7th$f({|LG8zzIE0g?cyW;%Dmtv%C+0@xVxPE^ zyZzi9P%JAD6ynwHptuzP`Kox7*9h7XSMonCalv;Md0i9Vb-c*!f0ubfk?&T&T}AHh z4m8Bz{JllKcdNg?D^%a5MFQ;#1z|*}H^qHLzW)L}wp?2tY7RejtSh8<;Zw)QGJYUm z|MbTxyj*McKlStlT9I5XlSWtQGN&-LTr2XyNU+`490rg?LYLMRnz-@oKqT1hpCGqP zyRXt4=_Woj$%n5ee<3zhLF>5>`?m9a#xQH+Jk_+|RM8Vi;2*XbK- zEL6sCpaGPzP>k8f4Kh|##_imt#zJMB;ir|JrMPGW`rityK1vHXMLy18%qmMQAm4WZ zP)i30KR&5vs15)C+8dM66&$k~i|ZT;KR&5vs15)C+8dJ(sAmGPijyIz6_bsqKLSFH zlOd=TljEpH0>h4zA*dCTK&emy#FCRCs1=i^sZ9bFmXjf<6_X39E(XY)00000#N437 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index efe2ff3449216..9036682bf0f0c 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=258e722ec21e955201e31447b0aed14201765a3bfbae296a46cf60b70e66db70 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.9-all.zip +distributionSha256Sum=682b4df7fe5accdca84a4d1ef6a3a6ab096b3efd5edf7de2bd8c758d95a93703 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME From 62305f018be973676fb65c26254b489dec3b6c89 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:22:32 +0200 Subject: [PATCH 012/352] Updates-warning-about-mounting-snapshots (#112057) * Updates-warning-about-mounting-snapshots * Update docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --------- Co-authored-by: shainaraskas <58563081+shainaraskas@users.noreply.github.com> --- .../searchable-snapshots/apis/mount-snapshot.asciidoc | 5 ++++- docs/reference/searchable-snapshots/index.asciidoc | 9 ++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index 5d838eb86dcf3..b47bc2370ab10 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -24,7 +24,10 @@ For more information, see <>. ==== {api-description-title} This API mounts a snapshot as a searchable snapshot index. -Note that manually mounting {ilm-init}-managed snapshots can <> with <>. + +Don't use this API for snapshots managed by {ilm-init}. Manually mounting +{ilm-init}-managed snapshots can <> with +<>. [[searchable-snapshots-api-mount-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index a8a9ef36dc9a6..a38971a0bae6a 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -176,9 +176,12 @@ nodes that have a shared cache. ==== Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss -or complications with snapshot handling. For optimal results, allow {ilm-init} to manage -snapshots automatically. If manual mounting is necessary, be aware of its potential -impact on {ilm-init} processes. For more information, learn about <>. +or complications with snapshot handling. + +For optimal results, allow {ilm-init} to manage +snapshots automatically. + +<>. ==== [[searchable-snapshots-shared-cache]] From 585bd64695b01b0aac37c0bb00bf53898a4ce358 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 22 Aug 2024 12:56:31 +0200 Subject: [PATCH 013/352] Add H3 Benchmarks (#111359) Microbenchmarks for H3 --- benchmarks/build.gradle | 1 + .../benchmark/h3/H3Benchmark.java | 46 +++++++++++++++++++ .../elasticsearch/benchmark/h3/H3State.java | 35 ++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java create mode 100644 benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 49e81a67e85f9..3f7ee8b60b53c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -37,6 +37,7 @@ dependencies { // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } + api(project(':libs:elasticsearch-h3')) api(project(':modules:aggregations')) api(project(':x-pack:plugin:esql-core')) api(project(':x-pack:plugin:esql')) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java new file mode 100644 index 0000000000000..2441acab7d405 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3Benchmark.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.Main; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.TimeUnit; + +@OutputTimeUnit(TimeUnit.SECONDS) +@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 25, time = 1, timeUnit = TimeUnit.SECONDS) +@Fork(1) +public class H3Benchmark { + + @Benchmark + public void pointToH3(H3State state, Blackhole bh) { + for (int i = 0; i < state.points.length; i++) { + for (int res = 0; res <= 15; res++) { + bh.consume(H3.geoToH3(state.points[i][0], state.points[i][1], res)); + } + } + } + + @Benchmark + public void h3Boundary(H3State state, Blackhole bh) { + for (int i = 0; i < state.h3.length; i++) { + bh.consume(H3.h3ToGeoBoundary(state.h3[i])); + } + } + + public static void main(String[] args) throws Exception { + Main.main(args); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java new file mode 100644 index 0000000000000..5707e692a0750 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/h3/H3State.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.benchmark.h3; + +import org.elasticsearch.h3.H3; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import java.io.IOException; +import java.util.Random; + +@State(Scope.Benchmark) +public class H3State { + + double[][] points = new double[1000][2]; + long[] h3 = new long[1000]; + + @Setup(Level.Trial) + public void setupTrial() throws IOException { + Random random = new Random(1234); + for (int i = 0; i < points.length; i++) { + points[i][0] = random.nextDouble() * 180 - 90; // lat + points[i][1] = random.nextDouble() * 360 - 180; // lon + int res = random.nextInt(16); // resolution + h3[i] = H3.geoToH3(points[i][0], points[i][1], res); + } + } +} From 1362d56865db488fb9e084ff3b3fe88c6f597b86 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 22 Aug 2024 15:13:52 +0300 Subject: [PATCH 014/352] Introduce mode `subobjects=auto` for objects (#110524) * Introduce mode `subobjects=auto` for objects * Update docs/changelog/110524.yaml * compilation error * tests and fixes * refactor * spotless * more tests * fix nested objects * fix test * update fetch test * add QA coverage * update tests * update tests * update tests * fix nested --- docs/changelog/110524.yaml | 5 + ...ogsIndexModeRandomDataChallengeRestIT.java | 19 +- .../test/index/92_metrics_auto_subobjects.yml | 250 ++++++++++++++ .../indices.create/20_synthetic_source.yml | 89 +++++ .../15_composition.yml | 109 ++++++ .../test/search/330_fetch_fields.yml | 52 +++ .../index/mapper/DocumentParser.java | 11 +- .../index/mapper/DynamicFieldsBuilder.java | 2 +- .../index/mapper/MapperFeatures.java | 3 +- .../index/mapper/NestedObjectMapper.java | 7 +- .../index/mapper/ObjectMapper.java | 131 ++++++-- .../index/mapper/PassThroughObjectMapper.java | 5 +- .../index/mapper/RootObjectMapper.java | 7 +- .../mapper/DynamicFieldsBuilderTests.java | 4 +- .../index/mapper/DynamicTemplatesTests.java | 310 +++++++++++++++++- .../FieldAliasMapperValidationTests.java | 3 +- .../index/mapper/MappingLookupTests.java | 3 +- .../index/mapper/ObjectMapperMergeTests.java | 74 ++--- .../index/mapper/ObjectMapperTests.java | 180 +++++++++- .../query/SearchExecutionContextTests.java | 4 +- .../index/mapper/MapperServiceTestCase.java | 7 +- .../mapper/SemanticTextFieldMapper.java | 4 +- 22 files changed, 1154 insertions(+), 125 deletions(-) create mode 100644 docs/changelog/110524.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml new file mode 100644 index 0000000000000..6274c99b09998 --- /dev/null +++ b/docs/changelog/110524.yaml @@ -0,0 +1,5 @@ +pr: 110524 +summary: Introduce mode `subobjects=auto` for objects +area: Mapping +type: enhancement +issues: [] diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 4e123c1630457..f53fdcb6e8600 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; import org.elasticsearch.logsdb.datageneration.FieldType; @@ -32,17 +33,17 @@ */ public class StandardVersusLogsIndexModeRandomDataChallengeRestIT extends StandardVersusLogsIndexModeChallengeRestIT { private final boolean fullyDynamicMapping; - private final boolean subobjectsDisabled; + private final ObjectMapper.Subobjects subobjects; private final DataGenerator dataGenerator; public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { super(); this.fullyDynamicMapping = randomBoolean(); - this.subobjectsDisabled = randomBoolean(); + this.subobjects = randomFrom(ObjectMapper.Subobjects.values()); var specificationBuilder = DataGeneratorSpecification.builder(); - if (subobjectsDisabled) { + if (subobjects != ObjectMapper.Subobjects.ENABLED) { specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); } this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { @@ -60,7 +61,7 @@ public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeG } public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequest.ObjectMappingParametersGenerator request) { - if (subobjectsDisabled == false) { + if (subobjects == ObjectMapper.Subobjects.ENABLED) { // Use default behavior return null; } @@ -71,13 +72,13 @@ public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequ // "dynamic: false/strict/runtime" is not compatible with subobjects: false return new DataSourceResponse.ObjectMappingParametersGenerator(() -> { var parameters = new HashMap(); + parameters.put("subobjects", subobjects.toString()); if (ESTestCase.randomBoolean()) { parameters.put("dynamic", "true"); } if (ESTestCase.randomBoolean()) { parameters.put("enabled", "true"); } - return parameters; }); } @@ -106,15 +107,15 @@ public void baselineMappings(XContentBuilder builder) throws IOException { @Override public void contenderMappings(XContentBuilder builder) throws IOException { if (fullyDynamicMapping == false) { - if (subobjectsDisabled) { - dataGenerator.writeMapping(builder, b -> builder.field("subobjects", false)); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + dataGenerator.writeMapping(builder, b -> builder.field("subobjects", subobjects.toString())); } else { dataGenerator.writeMapping(builder); } } else { builder.startObject(); - if (subobjectsDisabled) { - builder.field("subobjects", false); + if (subobjects != ObjectMapper.Subobjects.ENABLED) { + builder.field("subobjects", subobjects.toString()); } builder.endObject(); } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml new file mode 100644 index 0000000000000..984c1c22b2177 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -0,0 +1,250 @@ +--- +"Metrics object indexing": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics.host.name: localhost + metrics.host.id: 1 + metrics.time: 10 + metrics.time.max: 100 + metrics.time.min: 1 + +--- +"Root with metrics": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: requires supporting subobjects auto setting + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Metrics object indexing with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { metrics.host.name: localhost, metrics.host.id: 1, metrics.time: 10, metrics.time.max: 100, metrics.time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: metrics* + - match: {fields.metrics\.host\.id.long.searchable: true} + - match: {fields.metrics\.host\.id.long.aggregatable: true} + - match: {fields.metrics\.host\.name.keyword.searchable: true} + - match: {fields.metrics\.host\.name.keyword.aggregatable: true} + - match: {fields.metrics\.time.long.searchable: true} + - match: {fields.metrics\.time.long.aggregatable: true} + - match: {fields.metrics\.time\.max.long.searchable: true} + - match: {fields.metrics\.time\.max.long.aggregatable: true} + - match: {fields.metrics\.time\.min.long.searchable: true} + - match: {fields.metrics\.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + metrics: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 + +--- +"Root without subobjects with synthetic source": + - requires: + test_runner_features: allowed_warnings_regex + cluster_features: ["mapper.subobjects_auto"] + reason: added in 8.4.0 + + - do: + indices.put_template: + name: test + body: + index_patterns: test-* + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword + + - do: + allowed_warnings_regex: + - "index \\[test-1\\] matches multiple legacy templates \\[global, test\\], composable templates will only match a single template" + index: + index: test-1 + id: 1 + refresh: true + body: + { host.name: localhost, host.id: 1, time: 10, time.max: 100, time.min: 1 } + + - do: + field_caps: + index: test-1 + fields: [host*, time*] + - match: {fields.host\.name.keyword.searchable: true} + - match: {fields.host\.name.keyword.aggregatable: true} + - match: {fields.host\.id.long.searchable: true} + - match: {fields.host\.id.long.aggregatable: true} + - match: {fields.time.long.searchable: true} + - match: {fields.time.long.aggregatable: true} + - match: {fields.time\.max.long.searchable: true} + - match: {fields.time\.max.long.aggregatable: true} + - match: {fields.time\.min.long.searchable: true} + - match: {fields.time\.min.long.aggregatable: true} + + - do: + get: + index: test-1 + id: 1 + - match: {_index: "test-1"} + - match: {_id: "1"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + host.name: localhost + host.id: 1 + time: 10 + time.max: 100 + time.min: 1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index e51074ee55270..1393d5454a9da 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1250,3 +1250,92 @@ empty nested object sorted as a first document: - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested.a: "b" } + + +--- +subobjects auto: + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: requires tracking ignored source and supporting subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + id: + type: integer + regular: + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + stored: + store_array_source: true + properties: + span: + properties: + id: + type: keyword + trace: + properties: + id: + type: keyword + nested: + type: nested + auto_obj: + type: object + subobjects: auto + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "id": 1, "foo": 10, "foo.bar": 100, "regular": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 2, "foo": 20, "foo.bar": 200, "stored": [ { "trace": { "id": "a" }, "span": { "id": "1" } }, { "trace": { "id": "b" }, "span": { "id": "1" } } ] }' + - '{ "create": { } }' + - '{ "id": 3, "foo": 30, "foo.bar": 300, "nested": [ { "a": 10, "b": 20 }, { "a": 100, "b": 200 } ] }' + - '{ "create": { } }' + - '{ "id": 4, "auto_obj": { "foo": 40, "foo.bar": 400 } }' + + - match: { errors: false } + + - do: + search: + index: test + sort: id + + - match: { hits.hits.0._source.id: 1 } + - match: { hits.hits.0._source.foo: 10 } + - match: { hits.hits.0._source.foo\.bar: 100 } + - match: { hits.hits.0._source.regular.span.id: "1" } + - match: { hits.hits.0._source.regular.trace.id: [ "a", "b" ] } + - match: { hits.hits.1._source.id: 2 } + - match: { hits.hits.1._source.foo: 20 } + - match: { hits.hits.1._source.foo\.bar: 200 } + - match: { hits.hits.1._source.stored.0.trace.id: a } + - match: { hits.hits.1._source.stored.0.span.id: "1" } + - match: { hits.hits.1._source.stored.1.trace.id: b } + - match: { hits.hits.1._source.stored.1.span.id: "1" } + - match: { hits.hits.2._source.id: 3 } + - match: { hits.hits.2._source.foo: 30 } + - match: { hits.hits.2._source.foo\.bar: 300 } + - match: { hits.hits.2._source.nested.0.a: 10 } + - match: { hits.hits.2._source.nested.0.b: 20 } + - match: { hits.hits.2._source.nested.1.a: 100 } + - match: { hits.hits.2._source.nested.1.b: 200 } + - match: { hits.hits.3._source.id: 4 } + - match: { hits.hits.3._source.auto_obj.foo: 40 } + - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 45bcf64f98945..3d82539944a97 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -449,6 +449,115 @@ index: test-generic - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + +--- +"Composable index templates that include subobjects: auto at root": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-field + body: + template: + mappings: + properties: + parent.subfield: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-field + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent\.subfield.type: "keyword" } + - match: { test-generic.mappings.properties.message.type: "object" } + +--- +"Composable index templates that include subobjects: auto on arbitrary field": + - requires: + cluster_features: ["mapper.subobjects_auto"] + reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" + test_runner_features: "allowed_warnings" + + - do: + cluster.put_component_template: + name: test-subobjects + body: + template: + mappings: + properties: + parent: + type: object + subobjects: auto + properties: + message: + enabled: false + + - do: + cluster.put_component_template: + name: test-subfield + body: + template: + mappings: + properties: + parent: + properties: + child.grandchild: + type: keyword + + - do: + allowed_warnings: + - "index template [test-composable-template] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test-composable-template] will take precedence during new index creation" + indices.put_index_template: + name: test-composable-template + body: + index_patterns: + - test-* + composed_of: + - test-subobjects + - test-subfield + - is_true: acknowledged + + - do: + indices.create: + index: test-generic + + - do: + indices.get_mapping: + index: test-generic + - match: { test-generic.mappings.properties.parent.properties.child\.grandchild.type: "keyword" } + - match: { test-generic.mappings.properties.parent.properties.message.type: "object" } + + --- "Composition of component templates with different legal field mappings": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 703f2a0352fbd..c120bed2d369d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1125,3 +1125,55 @@ fetch geo_point: - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } - match: { hits.hits.0.fields.root\.subfield.0: 'child' } - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + +--- +"Test with subobjects: auto": + - requires: + cluster_features: "mapper.subobjects_auto" + reason: requires support for subobjects auto setting + + - do: + indices.create: + index: test + body: + mappings: + subobjects: auto + properties: + message: + type: object + subobjects: auto + enabled: false + + - do: + index: + index: test + refresh: true + body: > + { + "root": "parent", + "root.subfield": "child", + "message": { + "foo": 10, + "foo.bar": 20 + } + } + - match: {result: "created"} + + - do: + search: + index: test + body: + query: + term: + root.subfield: + value: 'child' + fields: + - field: 'root*' + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.root.0: 'parent' } + - match: { hits.hits.0.fields.root\.keyword.0: 'parent' } + - match: { hits.hits.0.fields.root\.subfield.0: 'child' } + - match: { hits.hits.0.fields.root\.subfield\.keyword.0: 'child' } + - is_false: hits.hits.0.fields.message + - match: { hits.hits.0._source.message.foo: 10 } + - match: { hits.hits.0._source.message.foo\.bar: 20 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 2bf3668a3dabe..35f0130c58706 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -41,6 +41,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Consumer; import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; @@ -476,7 +477,7 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr private static boolean shouldFlattenObject(DocumentParserContext context, FieldMapper fieldMapper) { return context.parser().currentToken() == XContentParser.Token.START_OBJECT - && context.parent().subobjects() == false + && context.parent().subobjects() != ObjectMapper.Subobjects.ENABLED && fieldMapper.supportsParsingObject() == false; } @@ -517,7 +518,7 @@ private static void parseObject(final DocumentParserContext context, String curr private static void doParseObject(DocumentParserContext context, String currentFieldName, Mapper objectMapper) throws IOException { context.path().add(currentFieldName); boolean withinLeafObject = context.path().isWithinLeafObject(); - if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() == false) { + if (objectMapper instanceof ObjectMapper objMapper && objMapper.subobjects() != ObjectMapper.Subobjects.ENABLED) { context.path().setWithinLeafObject(true); } parseObjectOrField(context, objectMapper); @@ -563,7 +564,7 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur } else { dynamicObjectMapper = DynamicFieldsBuilder.createDynamicObjectMapper(context, currentFieldName); } - if (context.parent().subobjects() == false) { + if (context.parent().subobjects() == ObjectMapper.Subobjects.DISABLED) { if (dynamicObjectMapper instanceof NestedObjectMapper) { throw new DocumentParsingException( context.parser().getTokenLocation(), @@ -1012,7 +1013,7 @@ private static class NoOpObjectMapper extends ObjectMapper { name, fullPath, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, Dynamic.RUNTIME, Collections.emptyMap() @@ -1051,7 +1052,7 @@ private static class RootDocumentParserContext extends DocumentParserContext { mappingLookup.getMapping().getRoot(), ObjectMapper.Dynamic.getRootDynamic(mappingLookup) ); - if (mappingLookup.getMapping().getRoot().subobjects()) { + if (mappingLookup.getMapping().getRoot().subobjects() == ObjectMapper.Subobjects.ENABLED) { this.parser = DotExpandingXContentParser.expandDots(parser, this.path); } else { this.parser = parser; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index d479cb97e3fd3..6eb1920df02c8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -161,7 +161,7 @@ static Mapper createDynamicObjectMapper(DocumentParserContext context, String na Mapper mapper = createObjectMapperFromTemplate(context, name); return mapper != null ? mapper - : new ObjectMapper.Builder(name, ObjectMapper.Defaults.SUBOBJECTS).enabled(ObjectMapper.Defaults.ENABLED) + : new ObjectMapper.Builder(name, context.parent().subobjects).enabled(ObjectMapper.Defaults.ENABLED) .build(context.createDynamicMapperBuilderContext()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 15d77ba6d2229..7810fcdc64773 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -31,7 +31,8 @@ public Set getFeatures() { KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE, IndexModeFieldMapper.QUERYING_INDEX_MODE, NodeMappingStats.SEGMENT_LEVEL_FIELDS_STATS, - BooleanFieldMapper.BOOLEAN_DIMENSION + BooleanFieldMapper.BOOLEAN_DIMENSION, + ObjectMapper.SUBOBJECTS_AUTO ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index f3c438adcea09..d866b3c78173b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Stream; @@ -49,7 +50,7 @@ public static class Builder extends ObjectMapper.Builder { private final Function bitSetProducer; public Builder(String name, IndexVersion indexCreatedVersion, Function bitSetProducer) { - super(name, Explicit.IMPLICIT_TRUE); + super(name, Optional.empty()); this.indexCreatedVersion = indexCreatedVersion; this.bitSetProducer = bitSetProducer; } @@ -121,7 +122,7 @@ public static class TypeParser extends ObjectMapper.TypeParser { @Override public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - if (parseSubobjects(node).explicit()) { + if (parseSubobjects(node).isPresent()) { throw new MapperParsingException("Nested type [" + name + "] does not support [subobjects] parameter"); } NestedObjectMapper.Builder builder = new NestedObjectMapper.Builder( @@ -209,7 +210,7 @@ public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { Query nestedTypeFilter, Function bitsetProducer ) { - super(name, fullPath, enabled, Explicit.IMPLICIT_TRUE, storeArraySource, dynamic, mappers); + super(name, fullPath, enabled, Optional.empty(), storeArraySource, dynamic, mappers); this.parentTypeFilter = parentTypeFilter; this.nestedTypePath = nestedTypePath; this.nestedTypeFilter = nestedTypeFilter; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index e504702d84c1e..29ec0357d7c1e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -33,6 +34,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.TreeMap; import java.util.stream.Stream; @@ -41,10 +43,50 @@ public class ObjectMapper extends Mapper { public static final String CONTENT_TYPE = "object"; static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source"; + static final NodeFeature SUBOBJECTS_AUTO = new NodeFeature("mapper.subobjects_auto"); + + /** + * Enhances the previously boolean option for subobjects support with an intermediate mode `auto` that uses + * any objects that are present in the mappings and flattens any fields defined outside the predefined objects. + */ + public enum Subobjects { + ENABLED(Boolean.TRUE), + DISABLED(Boolean.FALSE), + AUTO("auto"); + + private final Object printedValue; + + Subobjects(Object printedValue) { + this.printedValue = printedValue; + } + + static Subobjects from(Object node) { + if (node instanceof Boolean value) { + return value ? Subobjects.ENABLED : Subobjects.DISABLED; + } + if (node instanceof String value) { + if (value.equalsIgnoreCase("true")) { + return ENABLED; + } + if (value.equalsIgnoreCase("false")) { + return DISABLED; + } + if (value.equalsIgnoreCase("auto")) { + return AUTO; + } + } + throw new ElasticsearchParseException("unknown subobjects value: " + node); + } + + @Override + public String toString() { + return printedValue.toString(); + } + } public static class Defaults { public static final boolean ENABLED = true; - public static final Explicit SUBOBJECTS = Explicit.IMPLICIT_TRUE; + public static final Optional SUBOBJECTS = Optional.empty(); public static final Explicit STORE_ARRAY_SOURCE = Explicit.IMPLICIT_FALSE; public static final Dynamic DYNAMIC = Dynamic.TRUE; } @@ -81,13 +123,13 @@ static Dynamic getRootDynamic(MappingLookup mappingLookup) { } public static class Builder extends Mapper.Builder { - protected final Explicit subobjects; + protected Optional subobjects; protected Explicit enabled = Explicit.IMPLICIT_TRUE; protected Explicit storeArraySource = Defaults.STORE_ARRAY_SOURCE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name); this.subobjects = subobjects; } @@ -132,20 +174,27 @@ public Mapper build(MapperBuilderContext context) { public final void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { // If the mapper to add has no dots, or the current object mapper has subobjects set to false, // we just add it as it is for sure a leaf mapper - if (name.contains(".") == false || subobjects.value() == false) { + if (name.contains(".") == false || (subobjects.isPresent() && (subobjects.get() == Subobjects.DISABLED))) { add(name, mapper); - } - // otherwise we strip off the first object path of the mapper name, load or create - // the relevant object mapper, and then recurse down into it, passing the remainder - // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then - // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. - else { + } else { + // We strip off the first object path of the mapper name, load or create + // the relevant object mapper, and then recurse down into it, passing the remainder + // of the mapper name. So for a mapper 'foo.bar.baz', we locate 'foo' and then + // call addDynamic on it with the name 'bar.baz', and next call addDynamic on 'bar' with the name 'baz'. int firstDotIndex = name.indexOf('.'); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); - parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); - add(parentBuilder); + if (parentBuilder != null) { + parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); + add(parentBuilder); + } else if (subobjects.isPresent() && subobjects.get() == Subobjects.AUTO) { + // No matching parent object was found, the mapper is added as a leaf - similar to subobjects false. + add(name, mapper); + } else { + // Expected to find a matching parent object but got null. + throw new IllegalStateException("Missing intermediate object " + immediateChildFullName); + } } } @@ -160,7 +209,8 @@ private static Builder findObjectBuilder(String fullName, DocumentParserContext if (objectMapper != null) { return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); } - throw new IllegalStateException("Missing intermediate object " + fullName); + // no object mapper found + return null; } protected final Map buildMappers(MapperBuilderContext mapperBuilderContext) { @@ -176,7 +226,7 @@ protected final Map buildMappers(MapperBuilderContext mapperBuil // mix of object notation and dot notation. mapper = existing.merge(mapper, MapperMergeContext.from(mapperBuilderContext, Long.MAX_VALUE)); } - if (subobjects.value() == false && mapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.DISABLED && mapper instanceof ObjectMapper objectMapper) { // We're parsing a mapping that has set `subobjects: false` but has defined sub-objects objectMapper.asFlattenedFieldMappers(mapperBuilderContext).forEach(m -> mappers.put(m.leafName(), m)); } else { @@ -215,7 +265,7 @@ public boolean supportsVersion(IndexVersion indexCreatedVersion) { public Mapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { parserContext.incrementMappingObjectDepth(); // throws MapperParsingException if depth limit is exceeded - Explicit subobjects = parseSubobjects(node); + Optional subobjects = parseSubobjects(node); Builder builder = new Builder(name, subobjects); parseObjectFields(node, parserContext, builder); parserContext.decrementMappingObjectDepth(); @@ -277,10 +327,10 @@ protected static boolean parseObjectOrDocumentTypeProperties( return false; } - protected static Explicit parseSubobjects(Map node) { + protected static Optional parseSubobjects(Map node) { Object subobjectsNode = node.remove("subobjects"); if (subobjectsNode != null) { - return Explicit.explicitBoolean(XContentMapValues.nodeBooleanValue(subobjectsNode, "subobjects.subobjects")); + return Optional.of(Subobjects.from(subobjectsNode)); } return Defaults.SUBOBJECTS; } @@ -317,7 +367,9 @@ protected static void parseProperties(Builder objBuilder, Map pr } } - if (objBuilder.subobjects.value() == false && type.equals(NestedObjectMapper.CONTENT_TYPE)) { + if (objBuilder.subobjects.isPresent() + && objBuilder.subobjects.get() == Subobjects.DISABLED + && type.equals(NestedObjectMapper.CONTENT_TYPE)) { throw new MapperParsingException( "Tried to add nested object [" + fieldName @@ -331,7 +383,7 @@ protected static void parseProperties(Builder objBuilder, Map pr throw new MapperParsingException("No handler for type [" + type + "] declared on field [" + fieldName + "]"); } Mapper.Builder fieldBuilder; - if (objBuilder.subobjects.value() == false) { + if (objBuilder.subobjects.isPresent() && objBuilder.subobjects.get() != Subobjects.ENABLED) { fieldBuilder = typeParser.parse(fieldName, propNode, parserContext); } else { String[] fieldNameParts = fieldName.split("\\."); @@ -379,7 +431,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate private final String fullPath; protected final Explicit enabled; - protected final Explicit subobjects; + protected final Optional subobjects; protected final Explicit storeArraySource; protected final Dynamic dynamic; @@ -389,7 +441,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate String name, String fullPath, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit storeArraySource, Dynamic dynamic, Map mappers @@ -407,7 +459,9 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate } else { this.mappers = Map.copyOf(mappers); } - assert subobjects.value() || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) + assert subobjects.isEmpty() + || subobjects.get() != Subobjects.DISABLED + || this.mappers.values().stream().noneMatch(m -> m instanceof ObjectMapper) : "When subobjects is false, mappers must not contain an ObjectMapper"; } @@ -460,8 +514,8 @@ public final Dynamic dynamic() { return dynamic; } - public final boolean subobjects() { - return subobjects.value(); + public final Subobjects subobjects() { + return subobjects.orElse(Subobjects.ENABLED); } public final boolean storeArraySource() { @@ -502,7 +556,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex protected record MergeResult( Explicit enabled, - Explicit subObjects, + Optional subObjects, Explicit trackArraySource, Dynamic dynamic, Map mappers @@ -523,11 +577,11 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma } else { enabled = existing.enabled; } - final Explicit subObjects; - if (mergeWithObject.subobjects.explicit()) { + final Optional subObjects; + if (mergeWithObject.subobjects.isPresent()) { if (reason == MergeReason.INDEX_TEMPLATE) { subObjects = mergeWithObject.subobjects; - } else if (existing.subobjects != mergeWithObject.subobjects) { + } else if (existing.subobjects() != mergeWithObject.subobjects()) { throw new MapperException( "the [subobjects] parameter can't be updated for the object mapping [" + existing.fullPath() + "]" ); @@ -552,7 +606,7 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma trackArraySource = existing.storeArraySource; } MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.leafName()); - Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects.value()); + Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects); return new MergeResult( enabled, subObjects, @@ -566,11 +620,13 @@ private static Map buildMergedMappers( ObjectMapper existing, ObjectMapper mergeWithObject, MapperMergeContext objectMergeContext, - boolean subobjects + Optional subobjects ) { Map mergedMappers = new HashMap<>(); for (Mapper childOfExistingMapper : existing.mappers.values()) { - if (subobjects == false && childOfExistingMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && childOfExistingMapper instanceof ObjectMapper objectMapper) { // An existing mapping with sub-objects is merged with a mapping that has set `subobjects: false` objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .forEach(m -> mergedMappers.put(m.leafName(), m)); @@ -581,7 +637,9 @@ private static Map buildMergedMappers( for (Mapper mergeWithMapper : mergeWithObject) { Mapper mergeIntoMapper = mergedMappers.get(mergeWithMapper.leafName()); if (mergeIntoMapper == null) { - if (subobjects == false && mergeWithMapper instanceof ObjectMapper objectMapper) { + if (subobjects.isPresent() + && subobjects.get() == Subobjects.DISABLED + && mergeWithMapper instanceof ObjectMapper objectMapper) { // An existing mapping that has set `subobjects: false` is merged with a mapping with sub-objects objectMapper.asFlattenedFieldMappers(objectMergeContext.getMapperBuilderContext()) .stream() @@ -593,7 +651,8 @@ private static Map buildMergedMappers( putMergedMapper(mergedMappers, truncateObjectMapper(objectMergeContext, om)); } } else if (mergeIntoMapper instanceof ObjectMapper objectMapper) { - assert subobjects : "existing object mappers are supposed to be flattened if subobjects is false"; + assert subobjects.isEmpty() || subobjects.get() != Subobjects.DISABLED + : "existing object mappers are supposed to be flattened if subobjects is false"; putMergedMapper(mergedMappers, objectMapper.merge(mergeWithMapper, objectMergeContext)); } else { assert mergeIntoMapper instanceof FieldMapper || mergeIntoMapper instanceof FieldAliasMapper; @@ -675,7 +734,7 @@ private void ensureFlattenable(MapperBuilderContext context, ContentPath path) { if (isEnabled() == false) { throwAutoFlatteningException(path, "the value of [enabled] is [false]"); } - if (subobjects.explicit() && subobjects()) { + if (subobjects.isPresent() && subobjects.get() == Subobjects.ENABLED) { throwAutoFlatteningException(path, "the value of [subobjects] is [true]"); } } @@ -710,8 +769,8 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw if (isEnabled() != Defaults.ENABLED) { builder.field("enabled", enabled.value()); } - if (subobjects != Defaults.SUBOBJECTS) { - builder.field("subobjects", subobjects.value()); + if (subobjects.isPresent()) { + builder.field("subobjects", subobjects.get().printedValue); } if (storeArraySource != Defaults.STORE_ARRAY_SOURCE) { builder.field(STORE_ARRAY_SOURCE_PARAM, storeArraySource.value()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java index 0b7f4de157bdc..7370fe3c61772 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; @@ -52,7 +53,7 @@ public static class Builder extends ObjectMapper.Builder { public Builder(String name) { // Subobjects are not currently supported. - super(name, Explicit.IMPLICIT_FALSE); + super(name, Optional.of(Subobjects.DISABLED)); } @Override @@ -103,7 +104,7 @@ public PassThroughObjectMapper build(MapperBuilderContext context) { int priority ) { // Subobjects are not currently supported. - super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, Explicit.IMPLICIT_FALSE, dynamic, mappers); + super(name, fullPath, enabled, Optional.of(Subobjects.DISABLED), Explicit.IMPLICIT_FALSE, dynamic, mappers); this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields; this.priority = priority; if (priority < 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 11aabd8726f4f..6c178330e5c9e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -34,6 +34,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.BiConsumer; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; @@ -75,7 +76,7 @@ public static class Builder extends ObjectMapper.Builder { protected Explicit dateDetection = Defaults.DATE_DETECTION; protected Explicit numericDetection = Defaults.NUMERIC_DETECTION; - public Builder(String name, Explicit subobjects) { + public Builder(String name, Optional subobjects) { super(name, subobjects); } @@ -132,7 +133,7 @@ public RootObjectMapper build(MapperBuilderContext context) { RootObjectMapper( String name, Explicit enabled, - Explicit subobjects, + Optional subobjects, Explicit trackArraySource, Dynamic dynamic, Map mappers, @@ -442,7 +443,7 @@ protected boolean isRoot() { public static RootObjectMapper.Builder parse(String name, Map node, MappingParserContext parserContext) throws MapperParsingException { - Explicit subobjects = parseSubobjects(node); + Optional subobjects = parseSubobjects(node); RootObjectMapper.Builder builder = new Builder(name, subobjects); Iterator> iterator = node.entrySet().iterator(); while (iterator.hasNext()) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index a138f0910e6ec..878bdc91bba06 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -19,6 +18,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Optional; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -69,7 +69,7 @@ public void testCreateDynamicStringFieldAsKeywordForDimension() throws IOExcepti SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new PassThroughObjectMapper.Builder("labels").setPriority(0).setContainsDimensions().dynamic(ObjectMapper.Dynamic.TRUE) ).build(MapperBuilderContext.root(false, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 61926d72982d8..a5a5d9726f233 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -1132,6 +1132,14 @@ public void testDynamicRuntimeWithDynamicTemplate() throws IOException { } private MapperService createDynamicTemplateNoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("false"); + } + + private MapperService createDynamicTemplateAutoSubobjects() throws IOException { + return createDynamicTemplateWithSubobjects("auto"); + } + + private MapperService createDynamicTemplateWithSubobjects(String subobjects) throws IOException { return createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); { @@ -1141,7 +1149,7 @@ private MapperService createDynamicTemplateNoSubobjects() throws IOException { { b.field("match_mapping_type", "object"); b.field("match", "metric"); - b.startObject("mapping").field("type", "object").field("subobjects", false).endObject(); + b.startObject("mapping").field("type", "object").field("subobjects", subobjects).endObject(); } b.endObject(); } @@ -1388,7 +1396,7 @@ public void testDynamicSubobjectsFalseDynamicFalse() throws Exception { assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); assertEquals(1, metrics.mappers.size()); ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); - assertFalse(service.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, service.subobjects()); assertEquals(1, service.mappers.size()); assertNotNull(service.getMapper("time")); } @@ -1434,6 +1442,255 @@ public void testSubobjectsFalseWithInnerNestedFromDynamicTemplate() { ); } + public void testSubobjectsAutoFlatPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.field("foo.metric.count", 10); + b.field("foo.bar.baz", 10); + b.field("foo.metric.count.min", 4); + b.field("foo.metric.count.max", 15); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoStructuredPaths() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startObject("metric"); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectsAutoArrayOfObjects() throws IOException { + MapperService mapperService = createDynamicTemplateAutoSubobjects(); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.startObject("foo"); + { + b.startArray("metric"); + { + b.startObject(); + { + b.field("count", 10); + b.field("count.min", 4); + b.field("count.max", 15); + } + b.endObject(); + b.startObject(); + { + b.field("count", 5); + b.field("count.min", 3); + b.field("count.max", 50); + } + b.endObject(); + } + b.endArray(); + b.startObject("bar"); + b.field("baz", 10); + b.endObject(); + } + b.endObject(); + })); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + assertNoSubobjects(mapperService); + } + + public void testSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.startObject("properties"); + b.startObject("metrics").field("type", "object").field("subobjects", "auto").endObject(); + b.endObject(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics.object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("metrics.object.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("object"), + instanceOf(NestedObjectMapper.class) + ); + } + + public void testRootSubobjectAutoDynamicNested() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("nested"); + { + b.field("match", "object"); + b.startObject("mapping"); + { + b.field("type", "nested"); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + b.field("subobjects", "auto"); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "object" : { + "foo" : "bar" + } + } + """)); + + assertNotNull(doc.docs().get(0).get("object.foo")); + assertThat(doc.dynamicMappingsUpdate().getRoot().getMapper("object"), instanceOf(NestedObjectMapper.class)); + } + + public void testDynamicSubobjectsAutoDynamicFalse() throws Exception { + // verify that we read the dynamic value properly from the parent mapper. DocumentParser#dynamicOrDefault splits the field + // name where dots are found, but it does that only for the parent prefix e.g. metrics.service and not for the leaf suffix time.max + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + b.startObject("metrics"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object"); + b.field("dynamic", "false"); + b.startObject("properties"); + { + b.startObject("service"); + { + b.field("type", "object"); + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "service": { + "time" : 10, + "time.max" : 500 + } + } + } + """)); + + assertNotNull(doc.rootDoc().getField("metrics.service.time")); + assertNull(doc.rootDoc().getField("metrics.service.time.max")); + assertNotNull(doc.dynamicMappingsUpdate()); + ObjectMapper metrics = (ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics"); + assertEquals(ObjectMapper.Dynamic.FALSE, metrics.dynamic()); + assertEquals(1, metrics.mappers.size()); + ObjectMapper service = (ObjectMapper) metrics.getMapper("service"); + assertEquals(ObjectMapper.Subobjects.AUTO, service.subobjects()); + assertEquals(1, service.mappers.size()); + assertNotNull(service.getMapper("time")); + } + + public void testSubobjectsAutoWithInnerNestedFromDynamicTemplate() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + { + b.startObject(); + { + b.startObject("test"); + { + b.field("match", "metrics"); + b.startObject("mapping"); + { + b.field("type", "object").field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + } + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(""" + { + "metrics": { + "time" : { + "foo" : "bar" + }, + "time.max" : 500 + } + } + """)); + + assertNotNull(doc.rootDoc().get("metrics.time.max")); + assertNotNull(doc.docs().get(0).get("metrics.time.foo")); + assertThat( + ((ObjectMapper) doc.dynamicMappingsUpdate().getRoot().getMapper("metrics")).getMapper("time"), + instanceOf(NestedObjectMapper.class) + ); + } + public void testDynamicSubobject() throws IOException { MapperService mapperService = createMapperService(topMapping(b -> { b.startArray("dynamic_templates"); @@ -1803,7 +2060,7 @@ public void testSubobjectsFalseDocWithEmptyObject() throws IOException { Mapping mapping = doc.dynamicMappingsUpdate(); ObjectMapper artifacts = (ObjectMapper) mapping.getRoot().getMapper("artifacts"); ObjectMapper leaf = (ObjectMapper) artifacts.getMapper("leaf"); - assertFalse(leaf.subobjects()); + assertEquals(ObjectMapper.Subobjects.DISABLED, leaf.subobjects()); } public void testSubobjectsFalseFlattened() throws IOException { @@ -1853,6 +2110,53 @@ public void testSubobjectsFalseFlattened() throws IOException { assertEquals("flattened", fooStructuredMapper.typeName()); } + public void testSubobjectsAutoFlattened() throws IOException { + String mapping = """ + { + "_doc": { + "properties": { + "attributes": { + "type": "object", + "subobjects": "auto" + } + }, + "dynamic_templates": [ + { + "test": { + "path_match": "attributes.resource.*", + "match_mapping_type": "object", + "mapping": { + "type": "flattened" + } + } + } + ] + } + } + """; + String docJson = """ + { + "attributes.resource": { + "complex.attribute": { + "a": "b" + }, + "foo.bar": "baz" + } + } + """; + + MapperService mapperService = createMapperService(mapping); + ParsedDocument parsedDoc = mapperService.documentMapper().parse(source(docJson)); + merge(mapperService, dynamicMapping(parsedDoc.dynamicMappingsUpdate())); + + Mapper fooBarMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.foo.bar"); + assertNotNull(fooBarMapper); + assertEquals("text", fooBarMapper.typeName()); + Mapper fooStructuredMapper = mapperService.documentMapper().mappers().getMapper("attributes.resource.complex.attribute"); + assertNotNull(fooStructuredMapper); + assertEquals("flattened", fooStructuredMapper.typeName()); + } + public void testMatchWithArrayOfFieldNames() throws IOException { String mapping = """ { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index d913b86aed2d5..a8669a0befd0d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -16,6 +16,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -176,7 +177,7 @@ private static ObjectMapper createObjectMapper(String name) { name, name, Explicit.IMPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.FALSE, emptyMap() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 251b0ae62f3c5..6a790f7e91118 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -81,7 +82,7 @@ public void testSubfieldOverride() { "object", "object", Explicit.EXPLICIT_TRUE, - Explicit.IMPLICIT_TRUE, + Optional.empty(), Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.TRUE, Collections.singletonMap("object.subfield", fieldMapper) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index b3bb8cbe697a5..ea6ddf0257d6f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -7,11 +7,11 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.Optional; import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; @@ -26,9 +26,9 @@ private RootObjectMapper createMapping( boolean includeBarField, boolean includeBazField ) { - RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Explicit.IMPLICIT_TRUE); - rootBuilder.add(new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE).enabled(disabledFieldEnabled)); - ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Explicit.IMPLICIT_TRUE).enabled(fooFieldEnabled); + RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("type1", Optional.empty()); + rootBuilder.add(new ObjectMapper.Builder("disabled", Optional.empty()).enabled(disabledFieldEnabled)); + ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Optional.empty()).enabled(fooFieldEnabled); if (includeBarField) { fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false)); } @@ -77,8 +77,8 @@ public void testMergeWhenDisablingField() { public void testMergeDisabledField() { // GIVEN a mapping with "foo" field disabled // the field is disabled, and we are not trying to re-enable it, hence merge should work - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("disabled", Optional.empty()) ).build(MapperBuilderContext.root(false, false)); RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -100,10 +100,8 @@ public void testMergeEnabled() { public void testMergeEnabledForRootMapper() { String type = MapperService.SINGLE_MAPPING_NAME; - ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build( - MapperBuilderContext.root(false, false) - ); - ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).enabled(false) + ObjectMapper firstMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + ObjectMapper secondMapper = new RootObjectMapper.Builder("_doc", Optional.empty()).enabled(false) .build(MapperBuilderContext.root(false, false)); MapperException e = expectThrows( @@ -144,12 +142,10 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { } public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - createObjectSubobjectsFalseLeafWithDots() - ).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createObjectSubobjectsFalseLeafWithDots()) + .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -161,9 +157,9 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { } public void testMergedFieldNamesMultiFields() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); @@ -177,10 +173,10 @@ public void testMergedFieldNamesMultiFields() { } public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); @@ -212,9 +208,9 @@ public void testMergeWithLimit() { } public void testMergeWithLimitTruncatedObjectField() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).build(MapperBuilderContext.root(false, false)); + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -243,11 +239,11 @@ public void testMergeWithLimitTruncatedObjectField() { } public void testMergeSameObjectDifferentFields() { - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add(new KeywordFieldMapper.Builder("child1", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( new KeywordFieldMapper.Builder("child1", IndexVersion.current()).ignoreAbove(42) ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); @@ -270,10 +266,10 @@ public void testMergeSameObjectDifferentFields() { } public void testMergeWithLimitMultiField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword1") ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( createTextKeywordMultiField("text", "keyword2") ).build(MapperBuilderContext.root(false, false)); @@ -287,10 +283,10 @@ public void testMergeWithLimitMultiField() { } public void testMergeWithLimitRuntimeField() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).add(createTextKeywordMultiField("text", "keyword1")).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).addRuntimeField( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).addRuntimeField( new TestRuntimeField("existing_runtime_field", "keyword") ).addRuntimeField(new TestRuntimeField("new_runtime_field", "keyword")).build(MapperBuilderContext.root(false, false)); @@ -304,12 +300,12 @@ public void testMergeWithLimitRuntimeField() { } public void testMergeSubobjectsFalseWithObject() { - RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_FALSE) + RootObjectMapper mergeInto = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Optional.empty()).add( + new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).add( new KeywordFieldMapper.Builder("grandchild", IndexVersion.current()) ) ) @@ -326,7 +322,7 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false, false)); assertEquals("host.name", fieldMapper.leafName()); assertEquals("host.name", fieldMapper.fullPath()); - return new RootObjectMapper.Builder("_doc", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + return new RootObjectMapper.Builder("_doc", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) .build(MapperBuilderContext.root(false, false)); } @@ -346,7 +342,7 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { assertEquals("host.name", fieldMapper.leafName()); assertEquals("foo.metrics.host.name", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } @@ -369,7 +365,7 @@ private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { assertEquals("keyword", fieldMapper.leafName()); assertEquals("foo.metrics.host.name.keyword", fieldMapper.fullPath()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( - new ObjectMapper.Builder("metrics", Explicit.EXPLICIT_FALSE).add(fieldBuilder) + new ObjectMapper.Builder("metrics", Optional.of(ObjectMapper.Subobjects.DISABLED)).add(fieldBuilder) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 3c81f833985dd..49d8ba9c2ca29 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -21,9 +20,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.core.IsInstanceOf; import java.io.IOException; import java.util.List; +import java.util.Optional; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -164,7 +165,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { ObjectMapper objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertFalse(objectMapper.isEnabled()); - assertTrue(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.ENABLED, objectMapper.subobjects()); assertFalse(objectMapper.storeArraySource()); // Setting 'enabled' to true is allowed, and updates the mapping. @@ -175,7 +176,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { .startObject("object") .field("type", "object") .field("enabled", true) - .field("subobjects", false) + .field("subobjects", "auto") .field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true) .endObject() .endObject() @@ -186,7 +187,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { objectMapper = mapper.mappers().objectMappers().get("object"); assertNotNull(objectMapper); assertTrue(objectMapper.isEnabled()); - assertFalse(objectMapper.subobjects()); + assertEquals(ObjectMapper.Subobjects.AUTO, objectMapper.subobjects()); assertTrue(objectMapper.storeArraySource()); } @@ -500,6 +501,141 @@ public void testSubobjectsCannotBeUpdatedOnRoot() throws IOException { assertEquals("the [subobjects] parameter can't be updated for the object mapping [_doc]", exception.getMessage()); } + public void testSubobjectsAuto() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "long"); + b.endObject(); + b.startObject("time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.attributes")); + } + + public void testSubobjectsAutoWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + b.startObject("foo"); + b.field("type", "keyword"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.fieldType("metrics.service.foo")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.foo")); + } + + public void testSubobjectsAutoWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("metrics.service"); + { + b.field("subobjects", "auto"); + b.startObject("properties"); + { + b.startObject("time"); + b.field("type", "nested"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + + public void testSubobjectsAutoRoot() throws Exception { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.service.time.max"); + b.field("type", "long"); + b.endObject(); + b.startObject("metrics.attributes"); + { + b.field("type", "object"); + b.field("enabled", "false"); + } + b.endObject(); + }, "auto")); + assertNotNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.attributes")); + } + + public void testSubobjectsAutoRootWithInnerObject() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service.time"); + { + b.startObject("properties"); + { + b.startObject("max"); + b.field("type", "long"); + b.endObject(); + } + b.endObject(); + } + b.endObject(); + }, "auto")); + assertNull(mapperService.fieldType("metrics.service.time")); + assertNotNull(mapperService.fieldType("metrics.service.time.max")); + assertNotNull(mapperService.documentMapper().mappers().objectMappers().get("metrics.service.time")); + assertNotNull(mapperService.documentMapper().mappers().getMapper("metrics.service.time.max")); + } + + public void testSubobjectsAutoRootWithInnerNested() throws IOException { + MapperService mapperService = createMapperService(mappingWithSubobjects(b -> { + b.startObject("metrics.service"); + b.field("type", "nested"); + b.endObject(); + }, "auto")); + assertThat( + mapperService.documentMapper().mappers().objectMappers().get("metrics.service"), + IsInstanceOf.instanceOf(NestedObjectMapper.class) + ); + } + /** * Makes sure that an empty object mapper returns {@code null} from * {@link SourceLoader.SyntheticFieldLoader#docValuesLoader}. This @@ -554,8 +690,8 @@ public void testStoreArraySourceNoopInNonSyntheticSourceMode() throws IOExceptio } public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { - ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child_size_2", Explicit.IMPLICIT_TRUE).add( + ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Optional.empty()).add( + new ObjectMapper.Builder("child_size_2", Optional.empty()).add( new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_size_4", IndexVersion.current()) ) @@ -602,10 +738,26 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsAuto() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.AUTO)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) + ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); + List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); + assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); + } + + public void testFlattenSubobjectsFalse() { + MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.DISABLED)).add( + new ObjectMapper.Builder("child", Optional.empty()).add(new KeywordFieldMapper.Builder("keyword2", IndexVersion.current())) ).add(new KeywordFieldMapper.Builder("keyword1", IndexVersion.current())).build(rootContext); List fields = objectMapper.asFlattenedFieldMappers(rootContext).stream().map(FieldMapper::fullPath).toList(); assertThat(fields, containsInAnyOrder("parent.keyword1", "parent.child.keyword2")); @@ -613,8 +765,8 @@ public void testFlatten() { public void testFlattenDynamicIncompatible() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).add( - new ObjectMapper.Builder("child", Explicit.IMPLICIT_TRUE).dynamic(Dynamic.FALSE) + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).add( + new ObjectMapper.Builder("child", Optional.empty()).dynamic(Dynamic.FALSE) ).build(rootContext); IllegalArgumentException exception = expectThrows( @@ -631,7 +783,7 @@ public void testFlattenDynamicIncompatible() { public void testFlattenEnabledFalse() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.IMPLICIT_TRUE).enabled(false).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.empty()).enabled(false).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -646,7 +798,7 @@ public void testFlattenEnabledFalse() { public void testFlattenExplicitSubobjectsTrue() { MapperBuilderContext rootContext = MapperBuilderContext.root(false, false); - ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Explicit.EXPLICIT_TRUE).build(rootContext); + ObjectMapper objectMapper = new ObjectMapper.Builder("parent", Optional.of(ObjectMapper.Subobjects.ENABLED)).build(rootContext); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9cd1df700a618..ffca4352f0ae6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -27,7 +27,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -90,6 +89,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiFunction; import java.util.function.Function; @@ -384,7 +384,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); - RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( + RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index c5aa03d5548f6..272901eb19351 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -426,8 +426,13 @@ protected static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + return mappingWithSubobjects(buildFields, "false"); + } + + protected static XContentBuilder mappingWithSubobjects(CheckedConsumer buildFields, String subobjects) + throws IOException { return topMapping(xContentBuilder -> { - xContentBuilder.field("subobjects", false); + xContentBuilder.field("subobjects", subobjects); xContentBuilder.startObject("properties"); buildFields.accept(xContentBuilder); xContentBuilder.endObject(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index a8c3de84572a7..71906a720e969 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; -import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -61,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.function.Function; @@ -475,7 +475,7 @@ private static ObjectMapper createInferenceField( @Nullable SemanticTextField.ModelSettings modelSettings, Function bitSetProducer ) { - return new ObjectMapper.Builder(INFERENCE_FIELD, Explicit.EXPLICIT_TRUE).dynamic(ObjectMapper.Dynamic.FALSE) + return new ObjectMapper.Builder(INFERENCE_FIELD, Optional.of(ObjectMapper.Subobjects.ENABLED)).dynamic(ObjectMapper.Dynamic.FALSE) .add(createChunksField(indexVersionCreated, modelSettings, bitSetProducer)) .build(context); } From c1daf18bf5f5178d43dc17d3a3d1f5db9773098e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 22 Aug 2024 08:40:35 -0400 Subject: [PATCH 015/352] ESQL: Support INLINESTATS grouped on expressions (#111690) This adds support for grouping `INLINESTATS` on an expression: ``` | INLINESTATS MAX(avg_worked_seconds) BY SUBSTRING(last_name, 0, 1) ``` This functions *exactly* as thought you ran did: ``` | EVAL `SUBSTRING(last_name, 0, 1)` = SUBSTRING(last_name, 0, 1) | INLINESTATS MAX(avg_worked_seconds) BY `SUBSTRING(last_name, 0, 1)` ``` The calculated field is retained in the results. This works by running the `LogicalPlanOptimizer` before forking off plan phases. If we get sub-phases then we rerun the `LogicalPlanOptimizer` on each phase so we can fuse *stuff*. Then I had modify the optimizer rule that implements expressions in the `BY` position on `STATS` so it worked on `INLINESTATS`. And that's it? That's it?! Really? --- docs/changelog/111690.yaml | 5 + .../xpack/esql/ccq/MultiClusterSpecIT.java | 1 + .../src/main/resources/inlinestats.csv-spec | 164 +++++++++++++++++- .../src/main/resources/stats.csv-spec | 31 ++++ .../src/main/resources/union_types.csv-spec | 27 ++- .../xpack/esql/action/EsqlCapabilities.java | 5 + .../xpack/esql/analysis/Analyzer.java | 4 +- .../esql/optimizer/LogicalPlanOptimizer.java | 1 + .../xpack/esql/optimizer/OptimizerRules.java | 4 +- .../optimizer/rules/RemoveStatsOverride.java | 49 +++--- .../ReplaceStatsAggExpressionWithEval.java | 2 +- .../ReplaceStatsNestedExpressionWithEval.java | 27 ++- .../xpack/esql/plan/logical/Aggregate.java | 4 +- .../xpack/esql/plan/logical/InlineStats.java | 12 +- .../xpack/esql/plan/logical/Phased.java | 4 +- .../xpack/esql/plan/logical/Stats.java | 18 +- .../xpack/esql/session/EsqlSession.java | 39 +++-- .../elasticsearch/xpack/esql/CsvTests.java | 4 +- .../optimizer/LogicalPlanOptimizerTests.java | 26 +++ .../xpack/esql/plan/logical/PhasedTests.java | 9 +- 20 files changed, 360 insertions(+), 76 deletions(-) create mode 100644 docs/changelog/111690.yaml diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml new file mode 100644 index 0000000000000..36e715744ad88 --- /dev/null +++ b/docs/changelog/111690.yaml @@ -0,0 +1,5 @@ +pr: 111690 +summary: "ESQL: Support INLINESTATS grouped on expressions" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index d6ab99f0b21ac..3e799730f7269 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -111,6 +111,7 @@ protected void shouldSkipTest(String testName) throws IOException { isEnabled(testName, instructions, Clusters.oldVersion()) ); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec index e52f1e45cead8..3f2e14f74174b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec @@ -67,11 +67,70 @@ emp_no:integer | avg_worked_seconds:long | gender:keyword | max_avg_worked_secon 10030 | 394597613 | M | 394597613 ; -// TODO allow inline calculation like BY l = SUBSTRING( maxOfLongByCalculatedKeyword -required_capability: inlinestats +required_capability: inlinestats_v2 // tag::longest-tenured-by-first[] +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +// end::longest-tenured-by-first[] +; + +// tag::longest-tenured-by-first-result[] +emp_no:integer | avg_worked_seconds:long | last_name:keyword | SUBSTRING(last_name, 0, 1):keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +// end::longest-tenured-by-first-result[] +; + +maxOfLongByCalculatedNamedKeyword +required_capability: inlinestats_v2 + +FROM employees +| KEEP emp_no, avg_worked_seconds, last_name +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | l:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | A | 372660279 + 10074 | 382397583 | Bernatsky | B | 382397583 + 10044 | 387408356 | Casley | C | 387408356 + 10030 | 394597613 | Demeyer | D | 394597613 + 10087 | 305782871 | Eugenio | E | 305782871 +; + +maxOfLongByCalculatedDroppedKeyword +required_capability: inlinestats_v2 + +FROM employees +| INLINESTATS max_avg_worked_seconds = MAX(avg_worked_seconds) BY l = SUBSTRING(last_name, 0, 1) +| WHERE max_avg_worked_seconds == avg_worked_seconds +| KEEP emp_no, avg_worked_seconds, last_name, max_avg_worked_seconds +| SORT last_name ASC +| LIMIT 5 +; + +emp_no:integer | avg_worked_seconds:long | last_name:keyword | max_avg_worked_seconds:long + 10065 | 372660279 | Awdeh | 372660279 + 10074 | 382397583 | Bernatsky | 382397583 + 10044 | 387408356 | Casley | 387408356 + 10030 | 394597613 | Demeyer | 394597613 + 10087 | 305782871 | Eugenio | 305782871 +; + +maxOfLongByEvaledKeyword +required_capability: inlinestats + FROM employees | EVAL l = SUBSTRING(last_name, 0, 1) | KEEP emp_no, avg_worked_seconds, l @@ -79,17 +138,14 @@ FROM employees | WHERE max_avg_worked_seconds == avg_worked_seconds | SORT l ASC | LIMIT 5 -// end::longest-tenured-by-first[] ; -// tag::longest-tenured-by-first-result[] emp_no:integer | avg_worked_seconds:long | l:keyword | max_avg_worked_seconds:long 10065 | 372660279 | A | 372660279 10074 | 382397583 | B | 382397583 10044 | 387408356 | C | 387408356 10030 | 394597613 | D | 394597613 10087 | 305782871 | E | 305782871 -// end::longest-tenured-by-first-result[] ; maxOfLongByInt @@ -499,3 +555,101 @@ emp_no:integer | salary:integer | ninety_fifth_salary:double 10029 | 74999 | 73584.95 10045 | 74970 | 73584.95 ; + +byTwoCalculated +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +byTwoCalculatedSecondOverwrites +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(ST_Y(location), -1) + , x = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| EVAL x = ST_X(location) +| INLINESTATS min_sl=MIN(scalerank) + BY x = ROUND(x, -1) + , x = ROUND(x, -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | x:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | 40 | 2 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | -100 | 2 +; + + +groupShadowsAgg +required_capability: inlinestats_v2 + +FROM airports +| WHERE abbrev IS NOT NULL +| KEEP abbrev, scalerank, location +| INLINESTATS min_sl=MIN(scalerank) + , lat_10 = ROUND(ST_Y(location), -1) + BY lat_10 = ROUND(ST_Y(location), -1) + , lon_10 = ROUND(ST_X(location), -1) +| SORT abbrev DESC +| LIMIT 3 +; + +abbrev:keyword | scalerank:integer | location:geo_point | lat_10:double | lon_10:double | min_sl:integer + ZRH | 3 | POINT(8.56221279534765 47.4523895064915) | 50 | 10 | 2 + ZNZ | 4 | POINT (39.2223319841558 -6.21857034620282) | -10 | 40 | 4 + ZLO | 7 | POINT (-104.560095200097 19.1480860285854) | 20 | -100 | 2 +; + +groupShadowsField +required_capability: inlinestats_v2 + + FROM employees +| KEEP emp_no, salary, hire_date +| INLINESTATS avg_salary = AVG(salary) + BY hire_date = DATE_TRUNC(1 year, hire_date) +| WHERE salary > avg_salary +| SORT emp_no ASC +| LIMIT 4 +; + +emp_no:integer | salary:integer | hire_date:datetime | avg_salary:double + 10001 | 57305 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10002 | 56371 | 1985-01-01T00:00:00Z | 51831.818181818184 + 10003 | 61805 | 1986-01-01T00:00:00Z | 43869.63636363636 + 10005 | 63528 | 1989-01-01T00:00:00Z | 53487.07692307692 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index fc607edf4d212..3be846630d5b8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -1618,6 +1618,37 @@ m:i | o:i | l:i | s:i 1 | 39729 | 1 | 39729 ; +byTwoCalculatedSecondOverwrites +FROM employees +| STATS m = MAX(salary) by l = salary + 1, l = languages + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + +byTwoCalculatedSecondOverwritesReferencingFirst +FROM employees +| EVAL l = languages +| STATS m = MAX(salary) by l = l + 1, l = l + 1 +| SORT m +| LIMIT 5 +; + + m:i | l:i +66817 | 6 +73578 | 3 +73717 | 2 +74572 | 5 +74970 | 4 +; + nestedAggsOverGroupingWithAliasAndProjection#[skip:-8.13.99,reason:supported in 8.14] FROM employees | STATS e = length(f) + 1, c = count(*) by f = first_name diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index 6d1d4c7892886..6819727be0131 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -977,7 +977,25 @@ event_duration:long | _index:keyword | ts:date | ts_str:k ; -inlineStatsUnionGroup +inlineStatsUnionGroup-Ignore +required_capability: union_types +required_capability: inlinestats + +FROM sample_data, sample_data_ts_long +| INLINESTATS count = COUNT(*) + BY @timestamp = SUBSTRING(TO_STRING(@timestamp), 0, 7) +| SORT client_ip ASC, @timestamp ASC +| LIMIT 4 +; + +client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword | count:long + 172.21.0.5 | 1232382 | Disconnected | 1698068 | 1 + 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 +172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 +; + +inlineStatsUnionGroupWithEval-Ignore required_capability: union_types required_capability: inlinestats @@ -993,16 +1011,15 @@ client_ip:ip | event_duration:long | message:keyword | @timestamp:keyword 172.21.0.5 | 1232382 | Disconnected | 2023-10 | 7 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 1698064 | 1 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | 2023-10 | 7 - ; -inlineStatsUnionGroupTogether +inlineStatsUnionGroupTogether-Ignore required_capability: union_types required_capability: inlinestats FROM sample_data, sample_data_ts_long -| EVAL @timestamp = TO_STRING(TO_DATETIME(@timestamp)) -| INLINESTATS count = COUNT(*) BY @timestamp +| INLINESTATS count = COUNT(*) + BY @timestamp = TO_STRING(TO_DATETIME(@timestamp)) | SORT client_ip ASC, @timestamp ASC | LIMIT 4 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index b60701fe19365..8d478408e8781 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -57,6 +57,11 @@ public enum Cap { */ INLINESTATS(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** + * Support for the expressions in grouping in {@code INLINESTATS} syntax. + */ + INLINESTATS_V2(EsqlPlugin.INLINESTATS_FEATURE_FLAG), + /** * Support for aggregation function {@code TOP}. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 3ffb4acbe6455..5b59117ad356b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -454,7 +454,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { } groupings = newGroupings; if (changed.get()) { - stats = stats.with(newGroupings, stats.aggregates()); + stats = stats.with(stats.child(), newGroupings, stats.aggregates()); changed.set(false); } } @@ -483,7 +483,7 @@ private LogicalPlan resolveStats(Stats stats, List childrenOutput) { newAggregates.add(agg); } - stats = changed.get() ? stats.with(groupings, newAggregates) : stats; + stats = changed.get() ? stats.with(stats.child(), groupings, newAggregates) : stats; } return (LogicalPlan) stats; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index e55b090bbb35f..282f46e0de7bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -155,6 +155,7 @@ public LogicalPlan optimize(LogicalPlan verified) { if (failures.hasFailures()) { throw new VerificationException(failures); } + optimized.setOptimized(); return optimized; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 4d3134db34a0d..733fe2e8762bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Row; @@ -99,7 +100,8 @@ protected AttributeSet generates(LogicalPlan logicalPlan) { if (logicalPlan instanceof EsRelation || logicalPlan instanceof LocalRelation || logicalPlan instanceof Row - || logicalPlan instanceof Aggregate) { + || logicalPlan instanceof Aggregate + || logicalPlan instanceof InlineStats) { return logicalPlan.outputSet(); } if (logicalPlan instanceof GeneratingPlan generating) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java index 5592a04e2f813..0f8e0f450e585 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/RemoveStatsOverride.java @@ -11,26 +11,30 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerRules; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.List; /** - * Rule that removes Aggregate overrides in grouping, aggregates and across them inside. - * The overrides appear when the same alias is used multiple times in aggregations and/or groupings: - * STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10 + * Removes {@link Stats} overrides in grouping, aggregates and across them inside. + * The overrides appear when the same alias is used multiple times in aggregations + * and/or groupings: + * {@code STATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} * becomes - * STATS BY x = c + 10 - * That is the last declaration for a given alias, overrides all the other declarations, with - * groups having priority vs aggregates. + * {@code STATS BY x = c + 10} + * and + * {@code INLINESTATS x = COUNT(*), x = MIN(a) BY x = b + 1, x = c + 10} + * becomes + * {@code INLINESTATS BY x = c + 10} + * This is "last one wins", with groups having priority over aggregates. * Separately, it replaces expressions used as group keys inside the aggregates with references: - * STATS max(a + b + 1) BY a + b + * {@code STATS max(a + b + 1) BY a + b} * becomes - * STATS max($x + 1) BY $x = a + b + * {@code STATS max($x + 1) BY $x = a + b} */ -public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { +public final class RemoveStatsOverride extends AnalyzerRules.AnalyzerRule { @Override protected boolean skipResolved() { @@ -38,19 +42,18 @@ protected boolean skipResolved() { } @Override - protected LogicalPlan rule(Aggregate agg) { - return agg.resolved() ? removeAggDuplicates(agg) : agg; - } - - private static Aggregate removeAggDuplicates(Aggregate agg) { - var groupings = agg.groupings(); - var aggregates = agg.aggregates(); - - groupings = removeDuplicateNames(groupings); - aggregates = removeDuplicateNames(aggregates); - - // replace EsqlAggregate with Aggregate - return new Aggregate(agg.source(), agg.child(), agg.aggregateType(), groupings, aggregates); + protected LogicalPlan rule(LogicalPlan p) { + if (p.resolved() == false) { + return p; + } + if (p instanceof Stats stats) { + return (LogicalPlan) stats.with( + stats.child(), + removeDuplicateNames(stats.groupings()), + removeDuplicateNames(stats.aggregates()) + ); + } + return p; } private static List removeDuplicateNames(List list) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java index 1746931f9a63e..ea0a302f7131d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsAggExpressionWithEval.java @@ -34,7 +34,7 @@ * becomes * stats a1 = sum(a), a2 = min(b) by x | eval a = a1 + a2 | keep a, x * The rule also considers expressions applied over groups: - * stats a = x + 1 by x becomes stats by x | eval a = x + 1 | keep a, x + * {@code STATS a = x + 1 BY x} becomes {@code STATS BY x | EVAL a = x + 1 | KEEP a, x} * And to combine the two: * stats a = x + count(*) by x * becomes diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java index 206bd6d3d1c76..02b39f6babef0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/ReplaceStatsNestedExpressionWithEval.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Stats; import java.util.ArrayList; import java.util.HashMap; @@ -25,15 +25,26 @@ import java.util.Map; /** - * Replace nested expressions inside an aggregate with synthetic eval (which end up being projected away by the aggregate). - * stats sum(a + 1) by x % 2 + * Replace nested expressions inside a {@link Stats} with synthetic eval. + * {@code STATS SUM(a + 1) BY x % 2} * becomes - * eval `a + 1` = a + 1, `x % 2` = x % 2 | stats sum(`a+1`_ref) by `x % 2`_ref + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | STATS SUM(`a+1`_ref) BY `x % 2`_ref} + * and + * {@code INLINESTATS SUM(a + 1) BY x % 2} + * becomes + * {@code EVAL `a + 1` = a + 1, `x % 2` = x % 2 | INLINESTATS SUM(`a+1`_ref) BY `x % 2`_ref} */ -public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { +public final class ReplaceStatsNestedExpressionWithEval extends OptimizerRules.OptimizerRule { @Override - protected LogicalPlan rule(Aggregate aggregate) { + protected LogicalPlan rule(LogicalPlan p) { + if (p instanceof Stats stats) { + return rule(stats); + } + return p; + } + + private LogicalPlan rule(Stats aggregate) { List evals = new ArrayList<>(); Map evalNames = new HashMap<>(); Map groupingAttributes = new HashMap<>(); @@ -134,10 +145,10 @@ protected LogicalPlan rule(Aggregate aggregate) { var aggregates = aggsChanged.get() ? newAggs : aggregate.aggregates(); var newEval = new Eval(aggregate.source(), aggregate.child(), evals); - aggregate = new Aggregate(aggregate.source(), newEval, aggregate.aggregateType(), groupings, aggregates); + aggregate = aggregate.with(newEval, groupings, aggregates); } - return aggregate; + return (LogicalPlan) aggregate; } static String syntheticName(Expression expression, AggregateFunction af, int counter) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java index 01132425df11f..5b6fe8c0112c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Aggregate.java @@ -108,8 +108,8 @@ public Aggregate replaceChild(LogicalPlan newChild) { } @Override - public Aggregate with(List newGroupings, List newAggregates) { - return new Aggregate(source(), child(), aggregateType(), newGroupings, newAggregates); + public Aggregate with(LogicalPlan child, List newGroupings, List newAggregates) { + return new Aggregate(source(), child, aggregateType(), newGroupings, newAggregates); } public AggregateType aggregateType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index 187b3542e0607..b37976c00ad06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -98,8 +98,8 @@ public InlineStats replaceChild(LogicalPlan newChild) { } @Override - public InlineStats with(List newGroupings, List newAggregates) { - return new InlineStats(source(), child(), newGroupings, newAggregates); + public InlineStats with(LogicalPlan child, List newGroupings, List newAggregates) { + return new InlineStats(source(), child, newGroupings, newAggregates); } @Override @@ -121,11 +121,13 @@ public boolean expressionsResolved() { public List output() { if (this.lazyOutput == null) { List addedFields = new ArrayList<>(); - AttributeSet childOutput = child().outputSet(); + AttributeSet set = child().outputSet(); for (NamedExpression agg : aggregates) { - if (childOutput.contains(agg) == false) { + Attribute att = agg.toAttribute(); + if (set.contains(att) == false) { addedFields.add(agg); + set.add(att); } } @@ -207,7 +209,7 @@ private LogicalPlan groupedNextPhase(List schema, List firstPha if (g instanceof Attribute a) { groupingAttributes.add(a); } else { - throw new UnsupportedOperationException("INLINESTATS doesn't support expressions in grouping position yet"); + throw new IllegalStateException("optimized plans should only have attributes in groups, but got [" + g + "]"); } } List leftFields = new ArrayList<>(groupingAttributes.size()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java index ba0f97cdfa30b..6923f9e137eab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Phased.java @@ -91,8 +91,8 @@ public interface Phased { * Or {@code null} if there aren't any {@linkplain Phased} operations. */ static LogicalPlan extractFirstPhase(LogicalPlan plan) { - if (false == plan.analyzed()) { - throw new IllegalArgumentException("plan must be analyzed"); + if (false == plan.optimized()) { + throw new IllegalArgumentException("plan must be optimized"); } var firstPhase = new Holder(); plan.forEachUp(t -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java index 35d5229d4e52f..c46c735e7482e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Stats.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.List; @@ -16,10 +17,25 @@ * STATS-like operations. Like {@link Aggregate} and {@link InlineStats}. */ public interface Stats { + /** + * The user supplied text in the query for this command. + */ + Source source(); + /** * Rebuild this plan with new groupings and new aggregates. */ - Stats with(List newGroupings, List newAggregates); + Stats with(LogicalPlan child, List newGroupings, List newAggregates); + + /** + * Have all the expressions in this plan been resolved? + */ + boolean expressionsResolved(); + + /** + * The operation directly before this one in the plan. + */ + LogicalPlan child(); /** * List containing both the aggregate expressions and grouping expressions. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index a6bc7befccc80..25d155ccfde07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -125,7 +125,9 @@ public void execute( LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), - listener.delegateFailureAndWrap((next, analyzedPlan) -> executeAnalyzedPlan(request, runPhase, analyzedPlan, next)) + listener.delegateFailureAndWrap( + (next, analyzedPlan) -> executeOptimizedPlan(request, runPhase, optimizedPlan(analyzedPlan), next) + ) ); } @@ -133,17 +135,17 @@ public void execute( * Execute an analyzed plan. Most code should prefer calling {@link #execute} but * this is public for testing. See {@link Phased} for the sequence of operations. */ - public void executeAnalyzedPlan( + public void executeOptimizedPlan( EsqlQueryRequest request, BiConsumer> runPhase, - LogicalPlan analyzedPlan, + LogicalPlan optimizedPlan, ActionListener listener ) { - LogicalPlan firstPhase = Phased.extractFirstPhase(analyzedPlan); + LogicalPlan firstPhase = Phased.extractFirstPhase(optimizedPlan); if (firstPhase == null) { - runPhase.accept(logicalPlanToPhysicalPlan(analyzedPlan, request), listener); + runPhase.accept(logicalPlanToPhysicalPlan(optimizedPlan, request), listener); } else { - executePhased(new ArrayList<>(), analyzedPlan, request, firstPhase, runPhase, listener); + executePhased(new ArrayList<>(), optimizedPlan, request, firstPhase, runPhase, listener); } } @@ -155,11 +157,11 @@ private void executePhased( BiConsumer> runPhase, ActionListener listener ) { - PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(firstPhase, request); + PhysicalPlan physicalPlan = logicalPlanToPhysicalPlan(optimizedPlan(firstPhase), request); runPhase.accept(physicalPlan, listener.delegateFailureAndWrap((next, result) -> { try { profileAccumulator.addAll(result.profiles()); - LogicalPlan newMainPlan = Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages()); + LogicalPlan newMainPlan = optimizedPlan(Phased.applyResultsFromFirstPhase(mainPlan, physicalPlan.output(), result.pages())); LogicalPlan newFirstPhase = Phased.extractFirstPhase(newMainPlan); if (newFirstPhase == null) { PhysicalPlan finalPhysicalPlan = logicalPlanToPhysicalPlan(newMainPlan, request); @@ -235,7 +237,7 @@ private void preAnalyze(LogicalPlan parsed, BiFunction void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { + private void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one if (preAnalysis.indices.size() > 1) { @@ -352,8 +354,8 @@ private static Set subfields(Set names) { return names.stream().filter(name -> name.endsWith(WILDCARD) == false).map(name -> name + ".*").collect(Collectors.toSet()); } - private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQueryRequest request) { - PhysicalPlan physicalPlan = optimizedPhysicalPlan(logicalPlan); + private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan optimizedPlan, EsqlQueryRequest request) { + PhysicalPlan physicalPlan = optimizedPhysicalPlan(optimizedPlan); physicalPlan = physicalPlan.transformUp(FragmentExec.class, f -> { QueryBuilder filter = request.filter(); if (filter != null) { @@ -371,20 +373,25 @@ private PhysicalPlan logicalPlanToPhysicalPlan(LogicalPlan logicalPlan, EsqlQuer } public LogicalPlan optimizedPlan(LogicalPlan logicalPlan) { - assert logicalPlan.analyzed(); + if (logicalPlan.analyzed() == false) { + throw new IllegalStateException("Expected analyzed plan"); + } var plan = logicalPlanOptimizer.optimize(logicalPlan); LOGGER.debug("Optimized logicalPlan plan:\n{}", plan); return plan; } - public PhysicalPlan physicalPlan(LogicalPlan logicalPlan) { - var plan = mapper.map(optimizedPlan(logicalPlan)); + public PhysicalPlan physicalPlan(LogicalPlan optimizedPlan) { + if (optimizedPlan.optimized() == false) { + throw new IllegalStateException("Expected optimized plan"); + } + var plan = mapper.map(optimizedPlan); LOGGER.debug("Physical plan:\n{}", plan); return plan; } - public PhysicalPlan optimizedPhysicalPlan(LogicalPlan logicalPlan) { - var plan = physicalPlanOptimizer.optimize(physicalPlan(logicalPlan)); + public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { + var plan = physicalPlanOptimizer.optimize(physicalPlan(optimizedPlan)); LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 76e0466af4da0..f30db1bf9bba2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -415,10 +415,10 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { PlainActionFuture listener = new PlainActionFuture<>(); - session.executeAnalyzedPlan( + session.executeOptimizedPlan( new EsqlQueryRequest(), runPhase(bigArrays, physicalOperationProviders), - analyzed, + session.optimizedPlan(analyzed), listener.delegateFailureAndWrap( // Wrap so we can capture the warnings in the calling thread (next, result) -> next.onResponse( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index a294f33ece5c3..74f95e3defbd3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -127,6 +127,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; @@ -4542,6 +4543,31 @@ public void testReplaceSortByExpressionsWithStats() { as(aggregate.child(), EsRelation.class); } + /** + * Expects + * Limit[1000[INTEGER]] + * \_InlineStats[[emp_no % 2{r}#6],[COUNT(salary{f}#12) AS c, emp_no % 2{r}#6]] + * \_Eval[[emp_no{f}#7 % 2[INTEGER] AS emp_no % 2]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testInlinestatsNestedExpressionsInGroups() { + var plan = optimizedPlan(""" + FROM test + | INLINESTATS c = COUNT(salary) by emp_no % 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), InlineStats.class); + var groupings = agg.groupings(); + var aggs = agg.aggregates(); + var ref = as(groupings.get(0), ReferenceAttribute.class); + assertThat(aggs.get(1), is(ref)); + var eval = as(agg.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + assertThat(eval.fields().get(0).toAttribute(), is(ref)); + assertThat(eval.fields().get(0).name(), is("emp_no % 2")); + } + /** * Expects * diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java index 9a0f1ba3efe1d..5e45de6c77c42 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/PhasedTests.java @@ -31,14 +31,14 @@ public class PhasedTests extends ESTestCase { public void testZeroLayers() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); - relation.setAnalyzed(); + relation.setOptimized(); assertThat(Phased.extractFirstPhase(relation), nullValue()); } public void testOneLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan orig = new Dummy(Source.synthetic("orig"), relation); - orig.setAnalyzed(); + orig.setOptimized(); assertThat(Phased.extractFirstPhase(orig), sameInstance(relation)); LogicalPlan finalPhase = Phased.applyResultsFromFirstPhase( orig, @@ -49,6 +49,7 @@ public void testOneLayer() { finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) ); + finalPhase.setOptimized(); assertThat(Phased.extractFirstPhase(finalPhase), nullValue()); } @@ -56,7 +57,7 @@ public void testTwoLayer() { EsRelation relation = new EsRelation(Source.synthetic("relation"), new EsIndex("foo", Map.of()), IndexMode.STANDARD, false); LogicalPlan inner = new Dummy(Source.synthetic("inner"), relation); LogicalPlan orig = new Dummy(Source.synthetic("outer"), inner); - orig.setAnalyzed(); + orig.setOptimized(); assertThat( "extractFirstPhase should call #firstPhase on the earliest child in the plan", Phased.extractFirstPhase(orig), @@ -67,6 +68,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + secondPhase.setOptimized(); assertThat( "applyResultsFromFirstPhase should call #nextPhase one th earliest child in the plan", secondPhase, @@ -84,6 +86,7 @@ public void testTwoLayer() { List.of(new ReferenceAttribute(Source.EMPTY, "foo", DataType.KEYWORD)), List.of() ); + finalPhase.setOptimized(); assertThat( finalPhase, equalTo(new Row(orig.source(), List.of(new Alias(orig.source(), "foo", new Literal(orig.source(), "foo", DataType.KEYWORD))))) From f481b0722bf47f03bd00ade814d3347f4555e9d0 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 22 Aug 2024 14:51:19 +0200 Subject: [PATCH 016/352] Always check crsType when folding spatial functions (#112090) * Always check crsType when folding spatial functions * Update docs/changelog/112090.yaml * Only require capability for fixed test The other tests passed on older versions anyway. --- docs/changelog/112090.yaml | 6 ++ .../src/main/resources/spatial.csv-spec | 77 +++++++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../scalar/spatial/BinarySpatialFunction.java | 2 +- .../scalar/spatial/SpatialContains.java | 6 +- .../scalar/spatial/SpatialDisjoint.java | 6 +- .../scalar/spatial/SpatialIntersects.java | 6 +- .../scalar/spatial/SpatialWithin.java | 6 +- .../function/scalar/spatial/StDistance.java | 2 +- 9 files changed, 102 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/112090.yaml diff --git a/docs/changelog/112090.yaml b/docs/changelog/112090.yaml new file mode 100644 index 0000000000000..6d6e4d0851523 --- /dev/null +++ b/docs/changelog/112090.yaml @@ -0,0 +1,6 @@ +pr: 112090 +summary: Always check `crsType` when folding spatial functions +area: Geo +type: bug +issues: + - 112089 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 02067e9dbe490..35416c7945128 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -487,6 +487,17 @@ POINT (42.97109629958868 14.7552534006536) | 1 ############################################### # Tests for ST_INTERSECTS on GEO_POINT type +literalGeoPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_GEOPOINT("POINT(0 85)"), polygon = TO_GEOSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:geo_point | polygon:geo_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + pointIntersectsLiteralPolygon required_capability: st_intersects @@ -889,6 +900,34 @@ wkt:keyword | pt:geo_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 157249.59498573805 ; +literalGeoPointDistanceOneDegree +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_GEOPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_GEOPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:geo_point | distance:double +"POINT(1 0)" | POINT(1 0) | 111195.07310665186 +"POINT(-1 0)" | POINT(-1 0) | 111195.08242688453 +"POINT(0 1)" | POINT(0 1) | 111195.07776676829 +"POINT(0 -1)" | POINT(0 -1) | 111195.08242688453 +; + +twoCitiesPointDistanceGeo +required_capability: st_distance +required_capability: spatial_functions_fix_crstype_folding + +ROW p1 = TO_GEOPOINT("POINT(-90.82814 29.79511)"), p2 = TO_GEOPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:geo_point | p2:geo_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 10272.529272836206 +; + airportCityLocationPointDistance required_capability: st_distance @@ -1433,6 +1472,17 @@ POINT (726480.0130685265 3359566.331716279) | 849 ############################################### # Tests for ST_INTERSECTS on CARTESIAN_POINT type +literalCartesianPointIntersectsLiteralPolygon +required_capability: st_intersects + +ROW pt = TO_CARTESIANPOINT("POINT(0 85)"), polygon = TO_CARTESIANSHAPE("POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70))") +| EVAL intersects = ST_INTERSECTS(pt, polygon) +; + +pt:cartesian_point | polygon:cartesian_shape | intersects:boolean +POINT(0 85) | POLYGON((-10 70, 10 70, 10 85, -10 85, -10 70)) | true +; + cartesianCentroidFromAirportsAfterIntersectsPredicate required_capability: st_intersects @@ -1996,6 +2046,33 @@ wkt:keyword | pt:cartesian_point | distance:double "POINT(1 -1)" | POINT(1 -1) | 1.4142135623730951 ; +literalCartesianPointDistanceOneUnit +required_capability: st_distance + +ROW wkt = ["POINT(1 0)", "POINT(-1 0)", "POINT(0 1)", "POINT(0 -1)"] +| MV_EXPAND wkt +| EVAL pt = TO_CARTESIANPOINT(wkt) +| EVAL distance = ST_DISTANCE(pt, TO_CARTESIANPOINT("POINT(0 0)")) +; + +wkt:keyword | pt:cartesian_point | distance:double +"POINT(1 0)" | POINT(1 0) | 1.0 +"POINT(-1 0)" | POINT(-1 0) | 1.0 +"POINT(0 1)" | POINT(0 1) | 1.0 +"POINT(0 -1)" | POINT(0 -1) | 1.0 +; + +twoCitiesPointDistanceCartesian +required_capability: st_distance + +ROW p1 = TO_CARTESIANPOINT("POINT(-90.82814 29.79511)"), p2 = TO_CARTESIANPOINT("POINT(-90.79731509999999 29.8835389)") +| EVAL d = ST_DISTANCE(p1, p2) +; + +p1:cartesian_point | p2:cartesian_point | d:double +POINT (-90.82814 29.79511) | POINT (-90.79731509999999 29.8835389) | 0.09364744959271905 +; + airportCartesianCityLocationPointDistance required_capability: st_distance diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 8d478408e8781..afa8b6e1d06d7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -135,6 +135,11 @@ public enum Cap { */ ST_DISTANCE, + /** + * Fix determination of CRS types in spatial functions when folding. + */ + SPATIAL_FUNCTIONS_FIX_CRSTYPE_FOLDING, + /** * Fix to GROK and DISSECT that allows extracting attributes with the same name as the input * https://github.com/elastic/elasticsearch/issues/110184 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java index d34ff30d9b87b..84d776888c7ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -42,7 +42,7 @@ public static List getNamedWriteables() { } private final SpatialTypeResolver spatialTypeResolver; - protected SpatialCrsType crsType; + private SpatialCrsType crsType; protected final boolean leftDocValues; protected final boolean rightDocValues; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index afa2ba833dcd1..6cb3c34ba8b1f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -176,10 +176,10 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); Geometry rightGeom = makeGeometryFromLiteral(right()); - Component2D[] components = asLuceneComponent2Ds(crsType, rightGeom); - return (crsType == SpatialCrsType.GEO) + Component2D[] components = asLuceneComponent2Ds(crsType(), rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometries(docValueReader, components) : CARTESIAN.geometryRelatesGeometries(docValueReader, components); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index 9e37bf4c8fa51..d04dc9e1a6b07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index b7aaededf76f5..48e99989c5699 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -129,9 +129,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index 297a6b40c2175..c204468ae17d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -131,9 +131,9 @@ protected NodeInfo info() { @Override public Object fold() { try { - GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType, left()); - Component2D component2D = asLuceneComponent2D(crsType, right()); - return (crsType == SpatialCrsType.GEO) + GeometryDocValueReader docValueReader = asGeometryDocValueReader(crsType(), left()); + Component2D component2D = asLuceneComponent2D(crsType(), right()); + return (crsType() == SpatialCrsType.GEO) ? GEO.geometryRelatesGeometry(docValueReader, component2D) : CARTESIAN.geometryRelatesGeometry(docValueReader, component2D); } catch (IOException e) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java index 1fdd4241aa222..14bded51aa55f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StDistance.java @@ -173,7 +173,7 @@ protected NodeInfo info() { public Object fold() { var leftGeom = makeGeometryFromLiteral(left()); var rightGeom = makeGeometryFromLiteral(right()); - return (crsType == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); + return (crsType() == SpatialCrsType.GEO) ? GEO.distance(leftGeom, rightGeom) : CARTESIAN.distance(leftGeom, rightGeom); } @Override From 615e0846178ca92d19c5561cd737a2fa1fe2929b Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 22 Aug 2024 14:13:56 +0100 Subject: [PATCH 017/352] Add more cross-links about sniff/proxy modes (#112079) The info about remote cluster connection modes is a little disjointed. This commit adds some cross-links between the sections to help users find more relevant information. --- .../cluster/remote-clusters-settings.asciidoc | 14 ++++- .../modules/remote-clusters.asciidoc | 55 +++++++++++-------- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index 2308ec259da48..537783ef6ff01 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -6,7 +6,10 @@ mode are described separately. `cluster.remote..mode`:: The mode used for a remote cluster connection. The only supported modes are - `sniff` and `proxy`. + `sniff` and `proxy`. The default is `sniff`. See <> for + further information about these modes, and <> + and <> for further information about their + settings. `cluster.remote.initial_connect_timeout`:: @@ -97,6 +100,11 @@ you configure the remotes. [[remote-cluster-sniff-settings]] ==== Sniff mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: sniff` and then configure the following +settings. You may also leave `cluster.remote..mode` unset since +`sniff` is the default mode. + `cluster.remote..seeds`:: The list of seed nodes used to sniff the remote cluster state. @@ -117,6 +125,10 @@ you configure the remotes. [[remote-cluster-proxy-settings]] ==== Proxy mode remote cluster settings +To use <> to connect to a remote cluster, set +`cluster.remote..mode: proxy` and then configure the following +settings. + `cluster.remote..proxy_address`:: The address used for all remote connections. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 510ceb6ddb013..ca1c507aa4ed9 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -1,7 +1,7 @@ [[remote-clusters]] == Remote clusters You can connect a local cluster to other {es} clusters, known as _remote -clusters_. Remote clusters can be located in different datacenters or +clusters_. Remote clusters can be located in different datacenters or geographic regions, and contain indices or data streams that can be replicated with {ccr} or searched by a local cluster using {ccs}. @@ -30,9 +30,9 @@ capabilities, the local and remote cluster must be on the same [discrete] === Add remote clusters -NOTE: The instructions that follow describe how to create a remote connection from a -self-managed cluster. You can also set up {ccs} and {ccr} from an -link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] +NOTE: The instructions that follow describe how to create a remote connection from a +self-managed cluster. You can also set up {ccs} and {ccr} from an +link:https://www.elastic.co/guide/en/cloud/current/ec-enable-ccs.html[{ess} deployment] or from an link:https://www.elastic.co/guide/en/cloud-enterprise/current/ece-enable-ccs.html[{ece} deployment]. To add remote clusters, you can choose between @@ -52,7 +52,7 @@ controls. <>. Certificate based security model:: Uses mutual TLS authentication for cross-cluster operations. User authentication -is performed on the local cluster and a user's role names are passed to the +is performed on the local cluster and a user's role names are passed to the remote cluster. In this model, a superuser on the local cluster gains total read access to the remote cluster, so it is only suitable for clusters that are in the same security domain. <>. @@ -63,15 +63,17 @@ the same security domain. <>. [[sniff-mode]] Sniff mode:: -In sniff mode, a cluster is registered with a name of your choosing and a list -of addresses of _seed_ nodes. When you register a remote cluster using sniff -mode, {es} retrieves from one of the seed nodes the addresses of up to three -_gateway nodes_. Each `remote_cluster_client` node in the local {es} cluster -then opens several TCP connections to the publish addresses of the gateway -nodes. This mode therefore requires that the gateway nodes' publish addresses -are accessible to nodes in the local cluster. +In sniff mode, a cluster alias is registered with a name of your choosing and a +list of addresses of _seed_ nodes specified with the +`cluster.remote..seeds` setting. When you register a remote +cluster using sniff mode, {es} retrieves from one of the seed nodes the +addresses of up to three _gateway nodes_. Each `remote_cluster_client` node in +the local {es} cluster then opens several TCP connections to the publish +addresses of the gateway nodes. This mode therefore requires that the gateway +nodes' publish addresses are accessible to nodes in the local cluster. + -Sniff mode is the default connection mode. +Sniff mode is the default connection mode. See <> +for more information about configuring sniff mode. + [[gateway-nodes-selection]] The _gateway nodes_ selection depends on the following criteria: @@ -86,18 +88,23 @@ However, such nodes still have to satisfy the two above requirements. [[proxy-mode]] Proxy mode:: -In proxy mode, a cluster is registered with a name of your choosing and the -address of a TCP (layer 4) reverse proxy which you must configure to route -connections to the nodes of the remote cluster. When you register a remote -cluster using proxy mode, {es} opens several TCP connections to the proxy -address and uses these connections to communicate with the remote cluster. In -proxy mode {es} disregards the publish addresses of the remote cluster nodes -which means that the publish addresses of the remote cluster nodes need not be -accessible to the local cluster. +In proxy mode, a cluster alias is registered with a name of your choosing and +the address of a TCP (layer 4) reverse proxy specified with the +`cluster.remote..proxy_address` setting. You must configure this +proxy to route connections to one or more nodes of the remote cluster. When you +register a remote cluster using proxy mode, {es} opens several TCP connections +to the proxy address and uses these connections to communicate with the remote +cluster. In proxy mode {es} disregards the publish addresses of the remote +cluster nodes which means that the publish addresses of the remote cluster +nodes need not be accessible to the local cluster. + -Proxy mode is not the default connection mode, so you must configure it -explicitly if desired. Proxy mode has the same <> as sniff mode. +Proxy mode is not the default connection mode, so you must set +`cluster.remote..mode: proxy` to use it. See +<> for more information about configuring proxy +mode. ++ +Proxy mode has the same <> as sniff mode. include::cluster/remote-clusters-api-key.asciidoc[] From de14c1d3e3e96de0f899236cded9bb410d0b6cce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Thu, 22 Aug 2024 16:08:31 +0200 Subject: [PATCH 018/352] Fix testSuggestProfilesWithHint (#112010) --- .../elasticsearch/xpack/security/profile/ProfileIntegTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index 963c42c55aa60..d057b7ce0be20 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -451,7 +451,7 @@ public void testSuggestProfilesWithHint() throws IOException { final List spaces = List.of("space1", "space2", "space3", "space4", "*"); final List profiles = spaces.stream().map(space -> { final PlainActionFuture future1 = new PlainActionFuture<>(); - final String lastName = randomAlphaOfLengthBetween(3, 8); + final String lastName = randomAlphaOfLengthBetween(3, 8) + space; final Authentication.RealmRef realmRef = randomBoolean() ? AuthenticationTestHelper.randomRealmRef(false) : new Authentication.RealmRef( From f37440f441c5a05d45f68c45faa6558b8690eb17 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 22 Aug 2024 08:16:36 -0600 Subject: [PATCH 019/352] (Doc+) Allocation Explain Examples: THROTTLED, MAX_RETRY (#111558) Adds [Allocation Explain examples](https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html#cluster-allocation-explain-api-examples) for `THROTTLED` and `MAX_RETRY`. Also formats sub TOC so that we can after link code message to those docs. --- .../cluster/allocation-explain.asciidoc | 101 +++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 809c9d74f1450..7547dd74c5ecd 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -81,6 +81,7 @@ you might expect otherwise. ===== Unassigned primary shard +====== Conflicting settings The following request gets an allocation explanation for an unassigned primary shard. @@ -158,6 +159,56 @@ node. <5> The decider which led to the `no` decision for the node. <6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate. +====== Maximum number of retries exceeded + +The following response contains an allocation explanation for an unassigned +primary shard that has reached the maximum number of allocation retry attempts. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : true, + "current_state" : "unassigned", + "unassigned_info" : { + "at" : "2017-01-04T18:03:28.464Z", + "failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException", + "reason": "ALLOCATION_FAILED", + "failed_allocation_attempts": 5, + "last_allocation_status": "no", + }, + "can_allocate": "no", + "allocate_explanation": "cannot allocate because allocation is not permitted to any of the nodes", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "store" : { + "matching_size" : "4.2kb", + "matching_size_in_bytes" : 4325 + }, + "deciders" : [ + { + "decider": "max_retry", + "decision" : "NO", + "explanation": "shard has exceeded the maximum number of retries [5] on failed allocation attempts - manually call [/_cluster/reroute?retry_failed=true] to retry, [unassigned_info[[reason=ALLOCATION_FAILED], at[2024-07-30T21:04:12.166Z], failed_attempts[5], failed_nodes[[mEKjwwzLT1yJVb8UxT6anw]], delayed=false, details[failed shard on node [mEKjwwzLT1yJVb8UxT6anw]: failed recovery, failure RecoveryFailedException], allocation_status[deciders_no]]]" + } + ] + } + ] +} +---- +// NOTCONSOLE + +If decider message indicates a transient allocation issue, use +<> to retry allocation. + +====== No valid shard copy + The following response contains an allocation explanation for an unassigned primary shard that was previously allocated. @@ -184,6 +235,8 @@ TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy` ===== Unassigned replica shard +====== Allocation delayed + The following response contains an allocation explanation for a replica that's unassigned due to <>. @@ -241,8 +294,52 @@ unassigned due to <>. <2> The remaining delay before allocating the replica shard. <3> Information about the shard data found on a node. +====== Allocation throttled + +The following response contains an allocation explanation for a replica that's +queued to allocate but currently waiting on other queued shards. + +[source,js] +---- +{ + "index" : "my-index-000001", + "shard" : 0, + "primary" : false, + "current_state" : "unassigned", + "unassigned_info" : { + "reason" : "NODE_LEFT", + "at" : "2017-01-04T18:53:59.498Z", + "details" : "node_left[G92ZwuuaRY-9n8_tc-IzEg]", + "last_allocation_status" : "no_attempt" + }, + "can_allocate": "throttled", + "allocate_explanation": "Elasticsearch is currently busy with other activities. It expects to be able to allocate this shard when those activities finish. Please wait.", + "node_allocation_decisions" : [ + { + "node_id" : "3sULLVJrRneSg0EfBB-2Ew", + "node_name" : "node_t0", + "transport_address" : "127.0.0.1:9400", + "roles" : ["data_content", "data_hot"], + "node_decision" : "no", + "deciders" : [ + { + "decider": "throttling", + "decision": "THROTTLE", + "explanation": "reached the limit of incoming shard recoveries [2], cluster setting [cluster.routing.allocation.node_concurrent_incoming_recoveries=2] (can also be set via [cluster.routing.allocation.node_concurrent_recoveries])" + } + ] + } + ] +} +---- +// NOTCONSOLE + +This is a transient message that might appear when a large amount of shards are allocating. + ===== Assigned shard +====== Cannot remain on current node + The following response contains an allocation explanation for an assigned shard. The response indicates the shard is not allowed to remain on its current node and must be reallocated. @@ -295,6 +392,8 @@ and must be reallocated. <2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node. <3> Whether the shard is allowed to be allocated to another node. +====== Must remain on current node + The following response contains an allocation explanation for a shard that must remain on its current node. Moving the shard to another node would not improve cluster balance. @@ -338,7 +437,7 @@ cluster balance. ===== No arguments If you call the API with no arguments, {es} retrieves an allocation explanation -for an arbitrary unassigned primary or replica shard. +for an arbitrary unassigned primary or replica shard, returning any unassigned primary shards first. [source,console] ---- From 6d076dfa17438c02fdd4e912549ff26e57334c72 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Thu, 22 Aug 2024 17:20:47 +0300 Subject: [PATCH 020/352] ESQL: fix for missing indices error message (#111797) Reverts a part of https://github.com/elastic/elasticsearch/pull/109483 by going back to the previous (more restrictive) way of dealing with missing indices or aliases. More specifically, if an index pattern used in a query refers to a missing index or alias name and doesn't use a wildcard for this name, then we error out. Our lack of testing in this area made the change in https://github.com/elastic/elasticsearch/pull/109483 to be invisible. Fixes https://github.com/elastic/elasticsearch/issues/111712 --- docs/changelog/111797.yaml | 6 + .../xpack/esql/EsqlSecurityIT.java | 103 +++++++++-- .../xpack/esql/ccq/Clusters.java | 8 +- .../xpack/esql/ccq/EsqlRestValidationIT.java | 81 +++++++++ .../qa/multi_node/EsqlRestValidationIT.java | 27 +++ .../qa/single_node/EsqlRestValidationIT.java | 27 +++ .../qa/rest/EsqlRestValidationTestCase.java | 170 ++++++++++++++++++ .../xpack/esql/plugin/ComputeService.java | 9 +- .../RemoteClusterSecurityEsqlIT.java | 83 ++++++--- 9 files changed, 469 insertions(+), 45 deletions(-) create mode 100644 docs/changelog/111797.yaml create mode 100644 x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java create mode 100644 x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml new file mode 100644 index 0000000000000..00b793a19d9c3 --- /dev/null +++ b/docs/changelog/111797.yaml @@ -0,0 +1,6 @@ +pr: 111797 +summary: "ESQL: fix for missing indices error message" +area: ES|QL +type: bug +issues: + - 111712 diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index e661ad1e742c9..2b162b4f18ead 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -160,7 +161,7 @@ public void testAllowedIndices() throws Exception { .entry("values", List.of(List.of(72.0d))); assertMap(entityAsMap(resp), matcher); } - for (var index : List.of("index-user2", "index-user1,index-user2", "index-user*", "index*")) { + for (var index : List.of("index-user2", "index-user*", "index*")) { Response resp = runESQLCommand("metadata1_read2", "from " + index + " | stats sum=sum(value)"); assertOK(resp); MapMatcher matcher = responseMatcher().entry("columns", List.of(Map.of("name", "sum", "type", "double"))) @@ -170,7 +171,7 @@ public void testAllowedIndices() throws Exception { } public void testAliases() throws Exception { - for (var index : List.of("second-alias", "second-alias,index-user2", "second-*", "second-*,index*")) { + for (var index : List.of("second-alias", "second-*", "second-*,index*")) { Response resp = runESQLCommand( "alias_user2", "from " + index + " METADATA _index" + "| stats sum=sum(value), index=VALUES(_index)" @@ -185,7 +186,7 @@ public void testAliases() throws Exception { } public void testAliasFilter() throws Exception { - for (var index : List.of("first-alias", "first-alias,index-user1", "first-alias,index-*", "first-*,index-*")) { + for (var index : List.of("first-alias", "first-alias,index-*", "first-*,index-*")) { Response resp = runESQLCommand("alias_user1", "from " + index + " METADATA _index" + "| KEEP _index, org, value | LIMIT 10"); assertOK(resp); MapMatcher matcher = responseMatcher().entry( @@ -221,19 +222,97 @@ public void testInsufficientPrivilege() { assertThat(error.getMessage(), containsString("Unknown index [index-user1]")); } + public void testIndexPatternErrorMessageComparison_ESQL_SearchDSL() throws Exception { + // _search match_all query on the index-user1,index-user2 index pattern + XContentBuilder json = JsonXContent.contentBuilder(); + json.startObject(); + json.field("query", QueryBuilders.matchAllQuery()); + json.endObject(); + Request searchRequest = new Request("GET", "/index-user1,index-user2/_search"); + searchRequest.setJsonEntity(Strings.toString(json)); + searchRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "metadata1_read2")); + + // ES|QL query on the same index pattern + var esqlResp = expectThrows(ResponseException.class, () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2")); + var srchResp = expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)); + + for (ResponseException r : List.of(esqlResp, srchResp)) { + assertThat( + EntityUtils.toString(r.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + } + assertThat(esqlResp.getResponse().getStatusLine().getStatusCode(), equalTo(srchResp.getResponse().getStatusLine().getStatusCode())); + } + public void testLimitedPrivilege() throws Exception { - Response resp = runESQLCommand("metadata1_read2", """ - FROM index-user1,index-user2 METADATA _index - | STATS sum=sum(value), index=VALUES(_index) - """); - assertOK(resp); - Map respMap = entityAsMap(resp); + ResponseException resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "metadata1_read2", + "FROM index-user1,index-user2 METADATA _index | STATS sum=sum(value), index=VALUES(_index)" + ) + ); assertThat( - respMap.get("columns"), - equalTo(List.of(Map.of("name", "sum", "type", "double"), Map.of("name", "index", "type", "keyword"))) + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 METADATA _index | STATS index=VALUES(_index)") ); - assertThat(respMap.get("values"), equalTo(List.of(List.of(72.0, "index-user2")))); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("metadata1_read2", "FROM index-user1,index-user2 | STATS sum=sum(value)") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [metadata1_read2] with effective roles [metadata1_read2] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand("alias_user1", "FROM first-alias,index-user1 METADATA _index | KEEP _index, org, value | LIMIT 10") + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user1] with effective roles [alias_user1] on indices [index-user1]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); + + resp = expectThrows( + ResponseException.class, + () -> runESQLCommand( + "alias_user2", + "from second-alias,index-user2 METADATA _index | stats sum=sum(value), index=VALUES(_index)" + ) + ); + assertThat( + EntityUtils.toString(resp.getResponse().getEntity()), + containsString( + "unauthorized for user [test-admin] run as [alias_user2] with effective roles [alias_user2] on indices [index-user2]" + ) + ); + assertThat(resp.getResponse().getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_FORBIDDEN)); } public void testDocumentLevelSecurity() throws Exception { diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java index f20d758132cbb..fa8cb49c59aed 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java @@ -12,9 +12,13 @@ import org.elasticsearch.test.cluster.util.Version; public class Clusters { + + static final String REMOTE_CLUSTER_NAME = "remote_cluster"; + static final String LOCAL_CLUSTER_NAME = "local_cluster"; + public static ElasticsearchCluster remoteCluster() { return ElasticsearchCluster.local() - .name("remote_cluster") + .name(REMOTE_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) .nodes(2) @@ -28,7 +32,7 @@ public static ElasticsearchCluster remoteCluster() { public static ElasticsearchCluster localCluster(ElasticsearchCluster remoteCluster) { return ElasticsearchCluster.local() - .name("local_cluster") + .name(LOCAL_CLUSTER_NAME) .distribution(DistributionType.DEFAULT) .version(Version.CURRENT) .nodes(2) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..21307c5362417 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/EsqlRestValidationIT.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.AfterClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.StringJoiner; + +import static org.elasticsearch.xpack.esql.ccq.Clusters.REMOTE_CLUSTER_NAME; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + private static RestClient remoteClient; + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + @AfterClass + public static void closeRemoteClients() throws IOException { + try { + IOUtils.close(remoteClient); + } finally { + remoteClient = null; + } + } + + @Override + protected String clusterSpecificIndexName(String pattern) { + StringJoiner sj = new StringJoiner(","); + for (String index : pattern.split(",")) { + sj.add(remoteClusterIndex(index)); + } + return sj.toString(); + } + + private static String remoteClusterIndex(String indexName) { + return REMOTE_CLUSTER_NAME + ":" + indexName; + } + + @Override + protected RestClient provisioningClient() throws IOException { + return remoteClusterClient(); + } + + @Override + protected RestClient provisioningAdminClient() throws IOException { + return remoteClusterClient(); + } + + private RestClient remoteClusterClient() throws IOException { + if (remoteClient == null) { + var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); + remoteClient = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + } + return remoteClient; + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..0187bafe19fce --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(spec -> {}); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java new file mode 100644 index 0000000000000..5a31fc722eec1 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlRestValidationIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlRestValidationTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class EsqlRestValidationIT extends EsqlRestValidationTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java new file mode 100644 index 0000000000000..9ec4f60f4c843 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlRestValidationTestCase.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public abstract class EsqlRestValidationTestCase extends ESRestTestCase { + + private static final String indexName = "test_esql"; + private static final String aliasName = "alias-test_esql"; + protected static final String[] existentIndexWithWildcard = new String[] { + indexName + ",inexistent*", + indexName + "*,inexistent*", + "inexistent*," + indexName }; + private static final String[] existentIndexWithoutWildcard = new String[] { indexName + ",inexistent", "inexistent," + indexName }; + protected static final String[] existentAliasWithWildcard = new String[] { + aliasName + ",inexistent*", + aliasName + "*,inexistent*", + "inexistent*," + aliasName }; + private static final String[] existentAliasWithoutWildcard = new String[] { aliasName + ",inexistent", "inexistent," + aliasName }; + private static final String[] inexistentIndexNameWithWildcard = new String[] { "inexistent*", "inexistent1*,inexistent2*" }; + private static final String[] inexistentIndexNameWithoutWildcard = new String[] { "inexistent", "inexistent1,inexistent2" }; + private static final String createAlias = "{\"actions\":[{\"add\":{\"index\":\"" + indexName + "\",\"alias\":\"" + aliasName + "\"}}]}"; + private static final String removeAlias = "{\"actions\":[{\"remove\":{\"index\":\"" + + indexName + + "\",\"alias\":\"" + + aliasName + + "\"}}]}"; + + @Before + @After + public void assertRequestBreakerEmpty() throws Exception { + EsqlSpecTestCase.assertRequestBreakerEmpty(); + } + + @Before + public void prepareIndices() throws IOException { + if (provisioningClient().performRequest(new Request("HEAD", "/" + indexName)).getStatusLine().getStatusCode() == 404) { + var request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"mappings\": {\"properties\": {\"foo\":{\"type\":\"keyword\"}}}}"); + provisioningClient().performRequest(request); + } + assertOK(provisioningAdminClient().performRequest(new Request("POST", "/" + indexName + "/_refresh"))); + } + + @After + public void wipeTestData() throws IOException { + try { + var response = provisioningAdminClient().performRequest(new Request("DELETE", "/" + indexName)); + assertEquals(200, response.getStatusLine().getStatusCode()); + } catch (ResponseException re) { + assertEquals(404, re.getResponse().getStatusLine().getStatusCode()); + } + } + + private String getInexistentIndexErrorMessage() { + return "\"reason\" : \"Found 1 problem\\nline 1:1: Unknown index "; + } + + public void testInexistentIndexNameWithWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testInexistentIndexNameWithoutWildcard() throws IOException { + assertErrorMessages(inexistentIndexNameWithoutWildcard, getInexistentIndexErrorMessage(), 400); + } + + public void testExistentIndexWithoutWildcard() throws IOException { + for (String indexName : existentIndexWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + } + + public void testExistentIndexWithWildcard() throws IOException { + assertValidRequestOnIndices(existentIndexWithWildcard); + } + + public void testAlias() throws IOException { + createAlias(); + + for (String indexName : existentAliasWithoutWildcard) { + assertErrorMessage(indexName, "\"reason\" : \"no such index [inexistent]\"", 404); + } + assertValidRequestOnIndices(existentAliasWithWildcard); + + deleteAlias(); + } + + private void assertErrorMessages(String[] indices, String errorMessage, int statusCode) throws IOException { + for (String indexName : indices) { + assertErrorMessage(indexName, errorMessage + "[" + clusterSpecificIndexName(indexName) + "]", statusCode); + } + } + + protected String clusterSpecificIndexName(String indexName) { + return indexName; + } + + private void assertErrorMessage(String indexName, String errorMessage, int statusCode) throws IOException { + var specificName = clusterSpecificIndexName(indexName); + final var request = createRequest(specificName); + ResponseException exc = expectThrows(ResponseException.class, () -> client().performRequest(request)); + + assertThat(exc.getResponse().getStatusLine().getStatusCode(), equalTo(statusCode)); + assertThat(exc.getMessage(), containsString(errorMessage)); + } + + private Request createRequest(String indexName) throws IOException { + final var request = new Request("POST", "/_query"); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + request.setJsonEntity( + Strings.toString(JsonXContent.contentBuilder().startObject().field("query", "from " + indexName).endObject()) + ); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + request.setOptions(options); + return request; + } + + private void assertValidRequestOnIndices(String[] indices) throws IOException { + for (String indexName : indices) { + final var request = createRequest(clusterSpecificIndexName(indexName)); + Response response = client().performRequest(request); + assertOK(response); + } + } + + // Returned client is used to load the test data, either in the local cluster or a remote one (for + // multi-clusters). The client()/adminClient() will always connect to the local cluster + protected RestClient provisioningClient() throws IOException { + return client(); + } + + protected RestClient provisioningAdminClient() throws IOException { + return adminClient(); + } + + private void createAlias() throws IOException { + var r = new Request("POST", "_aliases"); + r.setJsonEntity(createAlias); + assertOK(provisioningClient().performRequest(r)); + } + + private void deleteAlias() throws IOException { + var r = new Request("POST", "/_aliases/"); + r.setJsonEntity(removeAlias); + assertOK(provisioningAdminClient().performRequest(r)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 29d524fc664a8..fa8a5693c59bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -11,11 +11,11 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -68,7 +68,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.Configuration; -import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.Result; import java.util.ArrayList; @@ -98,8 +97,6 @@ public class ComputeService { private final EnrichLookupService enrichLookupService; private final ClusterService clusterService; - private static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndexResolver.FIELD_CAPS_INDICES_OPTIONS; - public ComputeService( SearchService searchService, TransportService transportService, @@ -152,7 +149,7 @@ public void execute( return; } Map clusterToConcreteIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); if (dataNodePlan == null) { if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) { @@ -188,7 +185,7 @@ public void execute( } } Map clusterToOriginalIndices = transportService.getRemoteClusterService() - .groupIndices(DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); final var exchangeSource = new ExchangeSourceHandler( diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 262e1340fb465..f5f9410a145cc 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.junit.RunnableTestRuleAdapter; @@ -347,21 +348,6 @@ public void testCrossClusterQuery() throws Exception { | LIMIT 10""")); assertRemoteAndLocalResults(response); - // query remote cluster only - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2 - | SORT emp_id ASC - | LIMIT 2 - | KEEP emp_id, department""")); - assertRemoteOnlyResults(response); // same as above since the user only has access to employees - - // query remote and local cluster - but also include employees2 which the user does not have access to - response = performRequestWithRemoteSearchUser(esqlRequest(""" - FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 - | SORT emp_id ASC - | LIMIT 10""")); - assertRemoteAndLocalResults(response); // same as above since the user only has access to employees - // update role to include both employees and employees2 for the remote cluster final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); putRoleRequest.setJsonEntity(""" @@ -618,6 +604,37 @@ public void testCrossClusterQueryWithOnlyRemotePrivs() throws Exception { + "this action is granted by the index privileges [read,read_cross_cluster,all]" ) ); + + // query remote cluster only - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2 + | SORT emp_id ASC + | LIMIT 2 + | KEEP emp_id, department""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); + + // query remote and local cluster - but also include employees2 which the user does not have access to + error = expectThrows(ResponseException.class, () -> { performRequestWithRemoteSearchUser(esqlRequest(""" + FROM my_remote_cluster:employees,my_remote_cluster:employees2,employees,employees2 + | SORT emp_id ASC + | LIMIT 10""")); }); + + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString( + "action [indices:data/read/esql] is unauthorized for user [remote_search_user] with effective roles " + + "[remote_search], this action is granted by the index privileges [read,read_cross_cluster,all]" + ) + ); } @SuppressWarnings("unchecked") @@ -841,7 +858,7 @@ public void testAlias() throws Exception { }"""); assertOK(adminClient().performRequest(putRoleRequest)); // query `employees2` - for (String index : List.of("*:employees2", "*:employee*", "*:employee*,*:alias-employees,*:employees3")) { + for (String index : List.of("*:employees2", "*:employee*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); Response response = performRequestWithRemoteSearchUser(request); assertOK(response); @@ -849,15 +866,7 @@ public void testAlias() throws Exception { List ids = (List) responseAsMap.get("values"); assertThat(ids, equalTo(List.of(List.of("11"), List.of("13")))); } - // query `alias-engineering` - for (var index : List.of("*:alias*", "*:alias*", "*:alias*,my*:employees1", "*:alias*,my*:employees3")) { - Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); - Response response = performRequestWithRemoteSearchUser(request); - assertOK(response); - Map responseAsMap = entityAsMap(response); - List ids = (List) responseAsMap.get("values"); - assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); - } + // query `employees2` and `alias-engineering` for (var index : List.of("*:employees2,*:alias-engineering", "*:emp*,*:alias-engineering", "*:emp*,my*:alias*")) { Request request = esqlRequest("FROM " + index + " | KEEP emp_id | SORT emp_id | LIMIT 100"); @@ -874,6 +883,30 @@ public void testAlias() throws Exception { assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); assertThat(error.getMessage(), containsString(" Unknown index [" + index + "]")); } + + for (var index : List.of( + Tuple.tuple("*:employee*,*:alias-employees,*:employees3", "alias-employees,employees3"), + Tuple.tuple("*:alias*,my*:employees1", "employees1"), + Tuple.tuple("*:alias*,my*:employees3", "employees3") + )) { + Request request = esqlRequest("FROM " + index.v1() + " | KEEP emp_id | SORT emp_id | LIMIT 100"); + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(request)); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat( + error.getMessage(), + containsString("unauthorized for user [remote_search_user] with assigned roles [remote_search]") + ); + assertThat(error.getMessage(), containsString("user [test_user] on indices [" + index.v2() + "]")); + } + + // query `alias-engineering` + Request request = esqlRequest("FROM *:alias* | KEEP emp_id | SORT emp_id | LIMIT 100"); + Response response = performRequestWithRemoteSearchUser(request); + assertOK(response); + Map responseAsMap = entityAsMap(response); + List ids = (List) responseAsMap.get("values"); + assertThat(ids, equalTo(List.of(List.of("1"), List.of("7")))); + removeAliases(); } From ed60470518131a26f387df32915448b40098db48 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Thu, 22 Aug 2024 17:42:49 +0300 Subject: [PATCH 021/352] Display effective retention in the relevant data stream APIs (#112019) --- docs/changelog/112019.yaml | 5 + .../lifecycle/apis/get-lifecycle.asciidoc | 8 +- .../data-streams/lifecycle/index.asciidoc | 18 +- ...rial-manage-data-stream-retention.asciidoc | 215 ++++++++++++++++++ .../tutorial-manage-new-data-stream.asciidoc | 13 +- ...grate-data-stream-from-ilm-to-dsl.asciidoc | 38 ++-- .../DataStreamGlobalRetentionIT.java | 190 ++++++++++++++++ .../RestExplainDataStreamLifecycleAction.java | 7 + .../RestGetDataStreamLifecycleAction.java | 7 + .../rest/RestGetDataStreamsAction.java | 7 + .../lifecycle/40_effective_retention.yml | 104 +++++++++ .../datastreams/GetDataStreamAction.java | 2 +- .../ExplainDataStreamLifecycleAction.java | 2 +- .../GetDataStreamLifecycleAction.java | 2 +- .../cluster/metadata/DataStreamLifecycle.java | 14 +- .../metadata/DataStreamLifecycleTests.java | 21 +- 16 files changed, 595 insertions(+), 58 deletions(-) create mode 100644 docs/changelog/112019.yaml create mode 100644 docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc create mode 100644 modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java create mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml new file mode 100644 index 0000000000000..7afb207864ed7 --- /dev/null +++ b/docs/changelog/112019.yaml @@ -0,0 +1,5 @@ +pr: 112019 +summary: Display effective retention in the relevant data stream APIs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index c83572a4e0795..6bac1c7f7cc75 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -128,14 +128,18 @@ The response will look like the following: "name": "my-data-stream-1", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } }, { "name": "my-data-stream-2", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" } } ] diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 16ccf2ef82391..e4d5acfb704d3 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -14,10 +14,11 @@ To achieve that, it supports: * Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. -{es} is allowed at a later time to delete data older than this time period. +{es} is allowed at a later time to delete data older than this time period. Retention can be configured on the data stream level +or on a global level. Read more about the different options in this <>. A data stream lifecycle also supports downsampling the data stream backing indices. -See <> for +See <> for more details. [discrete] @@ -33,16 +34,17 @@ each data stream and performs the following steps: 3. After an index is not the write index anymore (i.e. the data stream has been rolled over), automatically tail merges the index. Data stream lifecycle executes a merge operation that only targets the long tail of small segments instead of the whole shard. As the segments are organised -into tiers of exponential sizes, merging the long tail of small segments is only a +into tiers of exponential sizes, merging the long tail of small segments is only a fraction of the cost of force merging to a single segment. The small segments would usually hold the most recent data so tail merging will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. If <> is configured it will execute +4. If <> is configured it will execute all the configured downsampling rounds. 5. Applies retention to the remaining backing indices. This means deleting the backing indices whose -`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing -indices and it is either the time since the backing index got rolled over, or the time optionally configured in the -<> setting. +`generation_time` is longer than the effective retention period (read more about the +<>). The `generation_time` is only applicable to rolled +over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured +in the <> setting. IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but @@ -75,4 +77,6 @@ include::tutorial-manage-new-data-stream.asciidoc[] include::tutorial-manage-existing-data-stream.asciidoc[] +include::tutorial-manage-data-stream-retention.asciidoc[] + include::tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc[] diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc new file mode 100644 index 0000000000000..83a587c250e73 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc @@ -0,0 +1,215 @@ +[role="xpack"] +[[tutorial-manage-data-stream-retention]] +=== Tutorial: Data stream retention + +In this tutorial, we are going to go over the data stream lifecycle retention; we will define it, go over how it can be configured +and how it can gets applied. Keep in mind, the following options apply only to data streams that are managed by the data stream lifecycle. + +. <> +. <> +. <> +. <> + +You can verify if a data steam is managed by the data stream lifecycle via the <>: + +//// +[source,console] +---- +PUT /_index_template/template +{ + "index_patterns": ["my-data-stream*"], + "template": { + "lifecycle": {} + }, + "data_stream": { } +} + +PUT /_data_stream/my-data-stream +---- +// TESTSETUP +//// + +//// +[source,console] +---- +DELETE /_data_stream/my-data-stream* +DELETE /_index_template/template +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.*" : null + } +} +---- +// TEARDOWN +//// + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- + +The result should look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream", <1> + "lifecycle": { + "enabled": true <2> + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of your data stream. +<2> Ensure that the lifecycle is enabled, meaning this should be `true`. + +[discrete] +[[what-is-retention]] +==== What is data stream retention? + +We define retention as the least amount of time the data of a data stream are going to be kept in {es}. After this time period +has passed, {es} is allowed to remove these data to free up space and/or manage costs. + +NOTE: Retention does not define the period that the data will be removed, but the minimum time period they will be kept. + +We define 4 different types of retention: + +* The data stream retention, or `data_retention`, which is the retention configured on the data stream level. It can be +set via an <> for future data streams or via the <> for an existing data stream. When the data stream retention is not set, it implies that the data +need to be kept forever. +* The global default retention, let's call it `default_retention`, which is a retention configured via the cluster setting +<> and will be +applied to all data streams managed by data stream lifecycle that do not have `data_retention` configured. Effectively, +it ensures that there will be no data streams keeping their data forever. This can be set via the +<>. +* The global max retention, let's call it `max_retention`, which is a retention configured via the cluster setting +<> and will be applied to +all data streams managed by data stream lifecycle. Effectively, it ensures that there will be no data streams whose retention +will exceed this time period. This can be set via the <>. +* The effective retention, or `effective_retention`, which is the retention applied at a data stream on a given moment. +Effective retention cannot be set, it is derived by taking into account all the configured retention listed above and is +calculated as it is described <>. + +[discrete] +[[retention-configuration]] +==== How to configure retention? + +- By setting the `data_retention` on the data stream level. This retention can be configured in two ways: ++ +-- For new data streams, it can be defined in the index template that would be applied during the data stream's creation. +You can use the <>, for example: ++ +[source,console] +-------------------------------------------------- +PUT _index_template/template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- +-- For an existing data stream, it can be set via the <>. ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. + +- By setting the global retention via the `data_streams.lifecycle.retention.default` and/or `data_streams.lifecycle.retention.max` +that are set on a cluster level. You can be set via the <>. For example: ++ +[source,console] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "data_streams.lifecycle.retention.default" : "7d", + "data_streams.lifecycle.retention.max" : "90d" + } +} +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[effective-retention-calculation]] +==== How is the effective retention calculated? +The effective is calculated in the following way: + +- The `effective_retention` is the `default_retention`, when `default_retention` is defined and the data stream does not +have `data_retention`. +- The `effective_retention` is the `data_retention`, when `data_retention` is defined and if `max_retention` is defined, +it is less than the `max_retention`. +- The `effective_retention` is the `max_retention`, when `max_retention` is defined, and the data stream has either no +`data_retention` or its `data_retention` is greater than the `max_retention`. + +The above is demonstrated in the examples below: + +|=== +|`default_retention` |`max_retention` |`data_retention` |`effective_retention` |Retention determined by + +|Not set |Not set |Not set |Infinite |N/A +|Not relevant |12 months |**30 days** |30 days |`data_retention` +|Not relevant |Not set |**30 days** |30 days |`data_retention` +|**30 days** |12 months |Not set |30 days |`default_retention` +|**30 days** |30 days |Not set |30 days |`default_retention` +|Not relevant |**30 days** |12 months |30 days |`max_retention` +|Not set |**30 days** |Not set |30 days |`max_retention` +|=== + +Considering our example, if we retrieve the lifecycle of `my-data-stream`: +[source,console] +---- +GET _data_stream/my-data-stream/_lifecycle +---- +// TEST[continued] + +We see that it will remain the same with what the user configured: +[source,console-result] +---- +{ + "data_streams": [ + { + "name": "my-data-stream", + "lifecycle": { + "enabled": true, + "data_retention": "30d", + "effective_retention": "30d", + "retention_determined_by": "data_stream_configuration" + } + } + ] +} +---- + +[discrete] +[[effective-retention-application]] +==== How is the effective retention applied? + +Retention is applied to the remaining backing indices of a data stream as the last step of +<>. Data stream lifecycle will retrieve the backing indices +whose `generation_time` is longer than the effective retention period and delete them. The `generation_time` is only +applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time +optionally configured in the <> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data get deleted, but +the minimum time data will be stored. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc index c34340a096046..01d51cdde3167 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -91,10 +91,12 @@ The result will look like this: { "data_streams": [ { - "name": "my-data-stream",<1> + "name": "my-data-stream", <1> "lifecycle": { - "enabled": true, <2> - "data_retention": "7d" <3> + "enabled": true, <2> + "data_retention": "7d", <3> + "effective_retention": "7d", <4> + "retention_determined_by": "data_stream_configuration" } } ] @@ -102,8 +104,9 @@ The result will look like this: -------------------------------------------------- <1> The name of your data stream. <2> Shows if the data stream lifecycle is enabled for this data stream. -<3> The retention period of the data indexed in this data stream, this means that the data in this data stream will -be kept at least for 7 days. After that {es} can delete it at its own discretion. +<3> The retention period of the data indexed in this data stream, as configured by the user. +<4> The retention period that will be applied by the data stream lifecycle. This means that the data in this data stream will + be kept at least for 7 days. After that {es} can delete it at its own discretion. If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the <>: diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 8d959d8f4ad84..a2c12466b7f2b 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [[tutorial-migrate-data-stream-from-ilm-to-dsl]] -=== Tutorial: Migrate ILM managed data stream to data stream lifecycle +=== Tutorial: Migrate ILM managed data stream to data stream lifecycle In this tutorial we'll look at migrating an existing data stream from <> to -<>. The existing {ilm-init} managed backing indices will continue +<>. The existing {ilm-init} managed backing indices will continue to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however, -the new backing indices will be managed by data stream lifecycle. -This way, a data stream is gradually migrated away from being managed by {ilm-init} to +the new backing indices will be managed by data stream lifecycle. +This way, a data stream is gradually migrated away from being managed by {ilm-init} to being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle -can co-manage a data stream; however, an index can only be managed by one system at +can co-manage a data stream; however, an index can only be managed by one system at a time. [discrete] @@ -17,7 +17,7 @@ a time. To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> +1. Update the index template that's backing the data stream to set <> to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ data stream using the <>. @@ -174,8 +174,8 @@ in the index template). To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute two steps: -1. Update the index template that's backing the data stream to set <> -to `false`, and to configure data stream lifecycle. +1. Update the index template that's backing the data stream to set <> +to `false`, and to configure data stream lifecycle. 2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using the <>. @@ -209,9 +209,9 @@ PUT _index_template/dsl-data-stream-template // TEST[continued] <1> The `prefer_ilm` setting will now be configured on the **new** backing indices -(created by rolling over the data stream) such that {ilm-init} does _not_ take +(created by rolling over the data stream) such that {ilm-init} does _not_ take precedence over data stream lifecycle. -<2> We're configuring the data stream lifecycle so _new_ data streams will be +<2> We're configuring the data stream lifecycle so _new_ data streams will be managed by data stream lifecycle. We've now made sure that new data streams will be managed by data stream lifecycle. @@ -227,7 +227,7 @@ PUT _data_stream/dsl-data-stream/_lifecycle ---- // TEST[continued] -We can inspect the data stream to check that the next generation will indeed be +We can inspect the data stream to check that the next generation will indeed be managed by data stream lifecycle: [source,console] @@ -266,7 +266,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", <3> @@ -292,7 +294,7 @@ GET _data_stream/dsl-data-stream <4> The `prefer_ilm` setting value we configured in the index template is reflected and will be configured accordingly for new backing indices. -We'll now rollover the data stream to see the new generation index being managed by +We'll now rollover the data stream to see the new generation index being managed by data stream lifecycle: [source,console] @@ -344,7 +346,9 @@ GET _data_stream/dsl-data-stream "template": "dsl-data-stream-template", "lifecycle": { "enabled": true, - "data_retention": "7d" + "data_retention": "7d", + "effective_retention": "7d", + "retention_determined_by": "data_stream_configuration" }, "ilm_policy": "pre-dsl-ilm-policy", "next_generation_managed_by": "Data stream lifecycle", @@ -375,9 +379,9 @@ in the index template [discrete] [[migrate-from-dsl-to-ilm]] ==== Migrate data stream back to ILM -We can easily change this data stream to be managed by {ilm-init} because we didn't remove -the {ilm-init} policy when we <>. +We can easily change this data stream to be managed by {ilm-init} because we didn't remove +the {ilm-init} policy when we <>. We can achieve this in two ways: diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java new file mode 100644 index 0000000000000..514eb6d8742ea --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamGlobalRetentionIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.datastreams.lifecycle; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningFailureException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.datastreams.DisabledSecurityDataStreamTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionIT extends DisabledSecurityDataStreamTestCase { + + @Before + public void setup() throws IOException { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.poll_interval", "1s") + .put("cluster.lifecycle.default.rollover", "min_docs=1,max_docs=1") + .build() + ); + // Create a template with the default lifecycle + Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); + putComposableIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": ["my-data-stream*"], + "data_stream": {}, + "template": { + "lifecycle": {} + } + } + """); + assertOK(client().performRequest(putComposableIndexTemplateRequest)); + + // Create a data streams with one doc + Request createDocRequest = new Request("POST", "/my-data-stream/_doc?refresh=true"); + createDocRequest.setJsonEntity("{ \"@timestamp\": \"2022-12-12\"}"); + assertOK(client().performRequest(createDocRequest)); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*")); + updateClusterSettings( + Settings.builder().putNull("data_streams.lifecycle.retention.default").putNull("data_streams.lifecycle.retention.max").build() + ); + } + + @SuppressWarnings("unchecked") + public void testDataStreamRetention() throws Exception { + // Set global retention and add retention to the data stream + { + updateClusterSettings( + Settings.builder() + .put("data_streams.lifecycle.retention.default", "7d") + .put("data_streams.lifecycle.retention.default", "90d") + .build() + ); + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "10s" + }"""); + assertAcknowledged(client().performRequest(request)); + } + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("data_stream_configuration")); + assertThat(lifecycle.get("data_retention"), is("10s")); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testDefaultRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.default", "10s").build()); + + // Verify that the effective retention matches the default retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("default_global_retention")); + assertThat(lifecycle.get("data_retention"), nullValue()); + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testMaxRetention() throws Exception { + // Set default global retention + updateClusterSettings(Settings.builder().put("data_streams.lifecycle.retention.max", "10s").build()); + boolean withDataStreamLevelRetention = randomBoolean(); + if (withDataStreamLevelRetention) { + try { + Request request = new Request("PUT", "_data_stream/my-data-stream/_lifecycle"); + request.setJsonEntity(""" + { + "data_retention": "30d" + }"""); + assertAcknowledged(client().performRequest(request)); + fail("Should have returned a warning about data retention exceeding the max retention"); + } catch (WarningFailureException warningFailureException) { + assertThat( + warningFailureException.getMessage(), + containsString("The retention provided [30d] is exceeding the max allowed data retention of this project [10s]") + ); + } + } + + // Verify that the effective retention matches the max retention + { + Request request = new Request("GET", "/_data_stream/my-data-stream"); + Response response = client().performRequest(request); + List dataStreams = (List) entityAsMap(response).get("data_streams"); + assertThat(dataStreams.size(), is(1)); + Map dataStream = (Map) dataStreams.get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + Map lifecycle = (Map) dataStream.get("lifecycle"); + assertThat(lifecycle.get("effective_retention"), is("10s")); + assertThat(lifecycle.get("retention_determined_by"), is("max_global_retention")); + if (withDataStreamLevelRetention) { + assertThat(lifecycle.get("data_retention"), is("30d")); + } else { + assertThat(lifecycle.get("data_retention"), nullValue()); + } + } + + // Verify that the first generation index was removed + assertBusy(() -> { + Response response = client().performRequest(new Request("GET", "/_data_stream/my-data-stream")); + Map dataStream = ((List>) entityAsMap(response).get("data_streams")).get(0); + assertThat(dataStream.get("name"), is("my-data-stream")); + List backingIndices = (List) dataStream.get("indices"); + assertThat(backingIndices.size(), is(1)); + // 2 backing indices created + 1 for the deleted index + assertThat(dataStream.get("generation"), is(3)); + }, 20, TimeUnit.SECONDS); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index f44e59d0278c3..82350130e57af 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -56,4 +58,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index 94724f6778013..00f9d4da88301 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -54,4 +56,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index 5acb59841d6a6..c3178208d51c2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +20,7 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -50,4 +52,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public boolean allowSystemIndexAccessByDefault() { return true; } + + @Override + public Set supportedCapabilities() { + return Set.of(DataStreamLifecycle.EFFECTIVE_RETENTION_REST_API_CAPABILITY); + } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml new file mode 100644 index 0000000000000..ef36f283fe237 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/40_effective_retention.yml @@ -0,0 +1,104 @@ +setup: + - requires: + cluster_features: [ "gte_v8.11.0" ] + reason: "Data stream lifecycle was released as tech preview in 8.11" + test_runner_features: allowed_warnings + - do: + allowed_warnings: + - "index template [template-with-lifecycle] has index patterns [managed-data-stream] matching patterns from existing older templates [global] with patterns (global => [*]); this template [template-with-lifecycle] will take precedence during new index creation" + indices.put_index_template: + name: template-with-lifecycle + body: + index_patterns: [ managed-data-stream ] + template: + settings: + index.number_of_replicas: 0 + lifecycle: + data_retention: "30d" + data_stream: { } + - do: + indices.create_data_stream: + name: managed-data-stream +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: null + data_streams.lifecycle.retention.default: null + +--- +"Retrieve effective retention via the data stream API": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index} + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.data_retention: '30d' } + - match: { data_streams.0.lifecycle.effective_retention: '30d'} + - match: { data_streams.0.lifecycle.retention_determined_by: 'data_stream_configuration'} + +--- +"Retrieve effective retention with explain": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /{index}/_lifecycle/explain + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.max: "7d" + - is_true: acknowledged + - do: + indices.get_data_stream: + name: "managed-data-stream" + - match: { data_streams.0.name: managed-data-stream } + - set: + data_streams.0.indices.0.index_name: backing_index + + - do: + indices.explain_data_lifecycle: + index: managed-data-stream + include_defaults: true + - match: { indices.$backing_index.managed_by_lifecycle: true } + - match: { indices.$backing_index.lifecycle.data_retention: '30d' } + - match: { indices.$backing_index.lifecycle.effective_retention: '7d' } + - match: { indices.$backing_index.lifecycle.retention_determined_by: 'max_global_retention' } + +--- +"Retrieve effective retention with data stream lifecycle": + - requires: + reason: "Effective retention was exposed in 8.16+" + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_data_stream/{index}/_lifecycle + capabilities: [ 'data_stream_lifecycle_effective_retention' ] + - do: + indices.put_data_lifecycle: + name: "managed-data-stream" + body: {} + - is_true: acknowledged + - do: + cluster.put_settings: + body: + persistent: + data_streams.lifecycle.retention.default: "7d" + - do: + indices.get_data_lifecycle: + name: "managed-data-stream" + - length: { data_streams: 1} + - match: { data_streams.0.name: managed-data-stream } + - match: { data_streams.0.lifecycle.effective_retention: '7d' } + - match: { data_streams.0.lifecycle.retention_determined_by: 'default_global_retention' } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 89282b8db3646..2fcc5ce3702c1 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -556,7 +556,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws for (DataStreamInfo dataStream : dataStreams) { dataStream.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 4dc9ada5dc01f..d51f00681bb5e 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -217,7 +217,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP builder.field(explainIndexDataLifecycle.getIndex()); explainIndexDataLifecycle.toXContent( builder, - DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + DataStreamLifecycle.addEffectiveRetentionParams(outerParams), rolloverConfiguration, globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index e038763169ef8..39427efbac4fd 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -187,7 +187,7 @@ public XContentBuilder toXContent( builder.field(LIFECYCLE_FIELD.getPreferredName()); lifecycle.toXContent( builder, - org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + org.elasticsearch.cluster.metadata.DataStreamLifecycle.addEffectiveRetentionParams(params), rolloverConfiguration, isSystemDataStream ? null : globalRetention ); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index de9d615022975..cb09fb6108049 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,7 +24,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -55,6 +54,7 @@ public class DataStreamLifecycle implements SimpleDiffable, // Versions over the wire public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; + public static final String EFFECTIVE_RETENTION_REST_API_CAPABILITY = "data_stream_lifecycle_effective_retention"; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; // The following XContent params are used to enrich the DataStreamLifecycle json with effective retention information @@ -367,14 +367,12 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE } /** - * Adds a retention param to signal that this serialisation should include the effective retention metadata + * Adds a retention param to signal that this serialisation should include the effective retention metadata. + * @param params the XContent params to be extended with the new flag + * @return XContent params with `include_effective_retention` set to true. If the flag exists it will override it. */ - public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { - boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); - return new DelegatingMapParams( - Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), - params - ); + public static ToXContent.Params addEffectiveRetentionParams(ToXContent.Params params) { + return new DelegatingMapParams(INCLUDE_EFFECTIVE_RETENTION_PARAMS, params); } public static Builder newBuilder(DataStreamLifecycle lifecycle) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index 50ab76ed794d8..a6ced9185dbad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -39,7 +39,6 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; -import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -348,21 +347,11 @@ public void testEffectiveRetention() { } public void testEffectiveRetentionParams() { - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); - } - { - ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( - new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) - ); - assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + Map initialParams = randomMap(0, 10, () -> Tuple.tuple(randomAlphaOfLength(10), randomAlphaOfLength(10))); + ToXContent.Params params = DataStreamLifecycle.addEffectiveRetentionParams(new ToXContent.MapParams(initialParams)); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + for (String key : initialParams.keySet()) { + assertThat(initialParams.get(key), equalTo(params.param(key))); } } From 0f176e1779b2869f8cb2b788aae5509bbbcf3725 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 22 Aug 2024 07:57:15 -0700 Subject: [PATCH 022/352] Remove leftover libsystemd references (#112078) Systemd notification now happens by directly communicating with the systemd socket. This commit removes the native access to libsystemd, which is no longer used. --- .../jna/JnaNativeLibraryProvider.java | 3 - .../nativeaccess/jna/JnaSystemdLibrary.java | 31 ----- .../nativeaccess/lib/NativeLibrary.java | 4 +- .../nativeaccess/lib/SystemdLibrary.java | 13 -- .../jdk/JdkNativeLibraryProvider.java | 3 - .../nativeaccess/jdk/JdkSystemdLibrary.java | 111 ------------------ 6 files changed, 2 insertions(+), 163 deletions(-) delete mode 100644 libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java delete mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java delete mode 100644 libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 454581ae70b51..79caf04c97246 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -15,7 +15,6 @@ import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -38,8 +37,6 @@ public JnaNativeLibraryProvider() { JnaMacCLibrary::new, Kernel32Library.class, JnaKernel32Library::new, - SystemdLibrary.class, - JnaSystemdLibrary::new, ZstdLibrary.class, JnaZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java deleted file mode 100644 index f06361e8807c5..0000000000000 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaSystemdLibrary.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jna; - -import com.sun.jna.Library; -import com.sun.jna.Native; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -class JnaSystemdLibrary implements SystemdLibrary { - private interface NativeFunctions extends Library { - int sd_notify(int unset_environment, String state); - } - - private final NativeFunctions functions; - - JnaSystemdLibrary() { - this.functions = Native.load("libsystemd.so.0", NativeFunctions.class); - } - - @Override - public int sd_notify(int unset_environment, String state) { - return functions.sd_notify(unset_environment, state); - } -} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java index faa0e861dc63f..cdd0a56c52a90 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/NativeLibrary.java @@ -9,5 +9,5 @@ package org.elasticsearch.nativeaccess.lib; /** A marker interface for libraries that can be loaded by {@link org.elasticsearch.nativeaccess.lib.NativeLibraryProvider} */ -public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, SystemdLibrary, - VectorLibrary, ZstdLibrary {} +public sealed interface NativeLibrary permits JavaLibrary, PosixCLibrary, LinuxCLibrary, MacCLibrary, Kernel32Library, VectorLibrary, + ZstdLibrary {} diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java deleted file mode 100644 index 3c4ffefb6e41f..0000000000000 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/SystemdLibrary.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.lib; - -public non-sealed interface SystemdLibrary extends NativeLibrary { - int sd_notify(int unset_environment, String state); -} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java index cbd43a394379b..1ac7d6c6f897d 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkNativeLibraryProvider.java @@ -14,7 +14,6 @@ import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; @@ -36,8 +35,6 @@ public JdkNativeLibraryProvider() { JdkMacCLibrary::new, Kernel32Library.class, JdkKernel32Library::new, - SystemdLibrary.class, - JdkSystemdLibrary::new, ZstdLibrary.class, JdkZstdLibrary::new, VectorLibrary.class, diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java deleted file mode 100644 index c34c8c070edc5..0000000000000 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkSystemdLibrary.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.nativeaccess.jdk; - -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.foreign.Arena; -import java.lang.foreign.FunctionDescriptor; -import java.lang.foreign.MemorySegment; -import java.lang.invoke.MethodHandle; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static java.lang.foreign.ValueLayout.ADDRESS; -import static java.lang.foreign.ValueLayout.JAVA_INT; -import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; - -class JdkSystemdLibrary implements SystemdLibrary { - - static { - // Find and load libsystemd. We attempt all instances of - // libsystemd in case of multiarch systems, and stop when - // one is successfully loaded. If none can be loaded, - // UnsatisfiedLinkError will be thrown. - List paths = findLibSystemd(); - if (paths.isEmpty()) { - String libpath = System.getProperty("java.library.path"); - throw new UnsatisfiedLinkError("Could not find libsystemd in java.library.path: " + libpath); - } - UnsatisfiedLinkError last = null; - for (String path : paths) { - try { - System.load(path); - last = null; - break; - } catch (UnsatisfiedLinkError e) { - last = e; - } - } - if (last != null) { - throw last; - } - } - - // findLibSystemd returns a list of paths to instances of libsystemd - // found within java.library.path. - static List findLibSystemd() { - // Note: on some systems libsystemd does not have a non-versioned symlink. - // System.loadLibrary only knows how to find non-versioned library files, - // so we must manually check the library path to find what we need. - final Path libsystemd = Paths.get("libsystemd.so.0"); - final String libpath = System.getProperty("java.library.path"); - final List foundPaths = new ArrayList<>(); - Arrays.stream(libpath.split(":")).map(Paths::get).filter(Files::exists).forEach(rootPath -> { - try { - Files.walkFileTree(rootPath, new SimpleFileVisitor<>() { - @Override - public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) { - if (Files.isReadable(dir)) { - return FileVisitResult.CONTINUE; - } - return FileVisitResult.SKIP_SUBTREE; - } - - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { - if (file.getFileName().equals(libsystemd)) { - foundPaths.add(file.toAbsolutePath().toString()); - } - return FileVisitResult.CONTINUE; - } - - @Override - public FileVisitResult visitFileFailed(Path file, IOException exc) { - return FileVisitResult.CONTINUE; - } - }); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - return foundPaths; - } - - private static final MethodHandle sd_notify$mh = downcallHandle("sd_notify", FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS)); - - @Override - public int sd_notify(int unset_environment, String state) { - try (Arena arena = Arena.ofConfined()) { - MemorySegment nativeState = MemorySegmentUtil.allocateString(arena, state); - return (int) sd_notify$mh.invokeExact(unset_environment, nativeState); - } catch (Throwable t) { - throw new AssertionError(t); - } - } -} From 322d319a83a140c4af3e151598351ee06f1d7496 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 01:39:25 +1000 Subject: [PATCH 023/352] Mute org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT testLimitedPrivilege #112110 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cd484b1c46867..581cbe6bd6025 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,6 +182,9 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAfterMissingIndex issue: https://github.com/elastic/elasticsearch/issues/112088 +- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT + method: testLimitedPrivilege + issue: https://github.com/elastic/elasticsearch/issues/112110 # Examples: # From 5534a1f8565f0be86930bd196df5b2e3d94f4eb3 Mon Sep 17 00:00:00 2001 From: Ankita Kumar Date: Thu, 22 Aug 2024 12:34:45 -0400 Subject: [PATCH 024/352] Fix Test Failure in SplitIndexIT (#112070) This PR fixes the testSplitIndexPrimaryTerm() test inside SplitIndexIT. Fixes #111282 --- muted-tests.yml | 3 --- .../action/admin/indices/create/SplitIndexIT.java | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 581cbe6bd6025..fe2bb3d37dcd6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -85,9 +85,6 @@ tests: - class: org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectAuthIT method: testAuthenticateWithImplicitFlow issue: https://github.com/elastic/elasticsearch/issues/111191 -- class: org.elasticsearch.action.admin.indices.create.SplitIndexIT - method: testSplitIndexPrimaryTerm - issue: https://github.com/elastic/elasticsearch/issues/111282 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT issue: https://github.com/elastic/elasticsearch/issues/111319 - class: org.elasticsearch.xpack.ml.integration.InferenceIngestInputConfigIT diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 27fd54c39cc95..22549a1562dcd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -276,6 +276,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards) + .put("index.routing.rebalance.enable", EnableAllocationDecider.Rebalance.NONE) ).get(); ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards From 16a12ff43163f3d2f56febe5c57414b0bc313943 Mon Sep 17 00:00:00 2001 From: Vishal Raj Date: Thu, 22 Aug 2024 19:03:32 +0100 Subject: [PATCH 025/352] Revert "[plugin/apm-data] Set fallback to legacy ILM policies" (#112112) * Revert "[plugin/apm-data] Set fallback to legacy ILM policies (#112028)" This reverts commit fd37ef88c28744181d4628a05baed57098884bd9. --- .../resources/index-templates/logs-apm.app@template.yaml | 3 --- .../resources/index-templates/logs-apm.error@template.yaml | 3 --- .../resources/index-templates/metrics-apm.app@template.yaml | 3 --- .../index-templates/metrics-apm.internal@template.yaml | 3 --- .../metrics-apm.service_destination.10m@template.yaml | 3 --- .../metrics-apm.service_destination.1m@template.yaml | 3 --- .../metrics-apm.service_destination.60m@template.yaml | 3 --- .../metrics-apm.service_summary.10m@template.yaml | 3 --- .../metrics-apm.service_summary.1m@template.yaml | 3 --- .../metrics-apm.service_summary.60m@template.yaml | 3 --- .../metrics-apm.service_transaction.10m@template.yaml | 3 --- .../metrics-apm.service_transaction.1m@template.yaml | 3 --- .../metrics-apm.service_transaction.60m@template.yaml | 3 --- .../metrics-apm.transaction.10m@template.yaml | 3 --- .../index-templates/metrics-apm.transaction.1m@template.yaml | 3 --- .../metrics-apm.transaction.60m@template.yaml | 3 --- .../resources/index-templates/traces-apm.rum@template.yaml | 3 --- .../index-templates/traces-apm.sampled@template.yaml | 5 ----- .../main/resources/index-templates/traces-apm@template.yaml | 3 --- x-pack/plugin/apm-data/src/main/resources/resources.yaml | 2 +- 20 files changed, 1 insertion(+), 60 deletions(-) diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index f74f1aa2e900e..21cad50f3fe90 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -23,6 +23,3 @@ template: index: default_pipeline: logs-apm.app@default-pipeline final_pipeline: apm@pipeline - lifecycle: - name: logs-apm.app_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 0ab9f01a76c5c..2cfa7b454722f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -30,6 +30,3 @@ template: index: default_pipeline: logs-apm.error@default-pipeline final_pipeline: apm@pipeline - lifecycle: - name: logs-apm.error_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index 5659a5c2cbd55..a3c7ab7c05193 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -24,6 +24,3 @@ template: index: default_pipeline: metrics-apm.app@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.app_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 8e5fca051aaeb..4c7df377a6cfa 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -25,9 +25,6 @@ template: index: default_pipeline: metrics-apm.internal@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.internal_metrics-default_policy - prefer_ilm: false mappings: properties: data_stream.dataset: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 23db583d3a30f..63c9ff9c3b988 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 4cbeb5053d072..6995a2d09b12e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index d29f953cb73a1..b39d0beca3740 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_destination@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_destination_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index 57f63b9ed7dcc..8d92b21866bb8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index 6b8e604e3f03e..de19df330aa0e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index 1c16e20a34f51..002676eb08cc1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_summary@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_summary_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index db85407599f67..549af3942dcd3 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 9e3220b2c4c3a..9bdacfc337663 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index c10435b2b50a6..8bcbeb53c74fe 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.service_transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.service_transaction_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 92c6a430a377d..68c1dc0f31c1e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_10m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 78ed0959f270f..6065f6e12f999 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -26,9 +26,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_1m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index 3625ecfc1458b..d8889ceb63f87 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -27,9 +27,6 @@ template: index: default_pipeline: metrics-apm.transaction@default-pipeline final_pipeline: metrics-apm@pipeline - lifecycle: - name: metrics-apm.transaction_60m_metrics-default_policy - prefer_ilm: false mappings: properties: metricset.interval: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index 53647284d2b91..d299481ff6e21 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -25,9 +25,6 @@ template: index: default_pipeline: traces-apm.rum@default-pipeline final_pipeline: traces-apm@pipeline - lifecycle: - name: traces-apm.rum_traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 9cffe241e0979..81457e2f204cb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -20,11 +20,6 @@ ignore_missing_component_templates: template: lifecycle: data_retention: 1h - settings: - index: - lifecycle: - name: traces-apm.sampled_traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index bcf406faa71da..fda953171b793 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -24,9 +24,6 @@ template: index: default_pipeline: traces-apm@default-pipeline final_pipeline: traces-apm@pipeline - lifecycle: - name: traces-apm.traces-default_policy - prefer_ilm: false mappings: properties: data_stream.type: diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index cd2111ffb9f83..3e66769d939ad 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 7 +version: 8 component-templates: # Data lifecycle. From d802e6fd11114a2a425d6dd8023c3a8b17144513 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 22 Aug 2024 16:04:32 -0400 Subject: [PATCH 026/352] Test infra: Catch and report errors building error (#112109) This modifies our `matchesMap` and `matchesList` infrastructure to report when it encounters an error building the description of an error. This looks something like: ``` a map containing foo: expected "val" but error describing bar: ``` This preserves the original error message while also giving you the context for the actual failure. Relates to #112039 Relates to #112049 --- muted-tests.yml | 6 --- .../org/elasticsearch/test/MapMatcher.java | 21 ++++++--- .../elasticsearch/test/MapMatcherTests.java | 43 +++++++++++++++++++ 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index fe2bb3d37dcd6..e23763ba2cdc0 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -167,12 +167,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 -- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT - method: testForceSleepsProfile {SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112039 -- class: org.elasticsearch.xpack.esql.qa.single_node.RestEsqlIT - method: testForceSleepsProfile {ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112049 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 diff --git a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java index 7a788eaacc6d4..b702809de5bed 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MapMatcher.java @@ -14,6 +14,8 @@ import org.hamcrest.StringDescription; import org.hamcrest.TypeSafeMatcher; +import java.io.PrintWriter; +import java.io.StringWriter; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -292,17 +294,24 @@ static void describeEntryUnexepectedButOk(Object value, Description description) } static void describeEntryValue(int keyWidth, Matcher matcher, Object v, Description description) { - if (v instanceof Map && matcher instanceof MapMatcher) { - ((MapMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (Map) v, description); + if (v instanceof Map && matcher instanceof MapMatcher mm) { + mm.describePotentialMismatch(keyWidth + INDENT, (Map) v, description); return; } - if (v instanceof List && matcher instanceof ListMatcher) { - ((ListMatcher) matcher).describePotentialMismatch(keyWidth + INDENT, (List) v, description); + if (v instanceof List && matcher instanceof ListMatcher lm) { + lm.describePotentialMismatch(keyWidth + INDENT, (List) v, description); return; } if (false == matcher.matches(v)) { - description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); - matcher.describeMismatch(v, description); + try { + description.appendText("expected ").appendDescriptionOf(matcher).appendText(" but "); + matcher.describeMismatch(v, description); + } catch (Exception e) { + description.appendText("error describing "); + StringWriter trace = new StringWriter(); + e.printStackTrace(new PrintWriter(trace)); + description.appendValue(trace); + } return; } description.appendValue(v); diff --git a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java index 48c9fcab3898a..3822c0d93d28d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/MapMatcherTests.java @@ -11,8 +11,10 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.StringDescription; +import org.hamcrest.TypeSafeMatcher; import java.io.IOException; import java.io.InputStream; @@ -24,7 +26,9 @@ import static org.elasticsearch.test.ListMatcher.matchesList; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -395,6 +399,45 @@ public void testSubMapDescribeTo() { baz: <0>""")); } + public void testSubMatcherDescribeFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + public void describeTo(Description description) { + throw new IllegalStateException("intentional failure"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected error describing """))); + } + + public void testSubMatcherMismatchFails() { + assertMismatch(Map.of("foo", 2.0, "bar", 2), matchesMap().entry("foo", new TypeSafeMatcher() { + @Override + protected void describeMismatchSafely(Object item, Description mismatchDescription) { + throw new IllegalStateException("intentional failure"); + } + + @Override + public void describeTo(Description description) { + description.appendValue("foo"); + } + + @Override + protected boolean matchesSafely(Object o) { + return false; + } + }).entry("bar", 2), both(containsString(""" + a map containing + foo: expected "foo" but error describing """))); + } + static void assertMismatch(T v, Matcher matcher, Matcher mismatchDescriptionMatcher) { assertMap(v, not(matcher)); StringDescription description = new StringDescription(); From 5dcdc34927f0a0b87820250f2b6d7cf982dc13cf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 06:43:13 +1000 Subject: [PATCH 027/352] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {stats.ByTwoCalculatedSecondOverwrites SYNC} #112117 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e23763ba2cdc0..cc459b7cccdf2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -176,6 +176,9 @@ tests: - class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT method: testLimitedPrivilege issue: https://github.com/elastic/elasticsearch/issues/112110 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112117 # Examples: # From 7b1d2a254341701f0a783826894089c99e0b96d3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 06:44:31 +1000 Subject: [PATCH 028/352] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} #112118 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index cc459b7cccdf2..ec097616c2af6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -179,6 +179,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwrites SYNC} issue: https://github.com/elastic/elasticsearch/issues/112117 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112118 # Examples: # From 14b7170921f2f0e4109255b83cb9af175385d87f Mon Sep 17 00:00:00 2001 From: Nick Tindall Date: Fri, 23 Aug 2024 09:13:41 +1000 Subject: [PATCH 029/352] Don't fail retention lease sync actions due to capacity constraints (#109414) Closes #105926 --- docs/changelog/109414.yaml | 6 ++ .../BackgroundRetentionLeaseSyncActionIT.java | 75 ++++++++++++++ .../seqno/RetentionLeaseSyncActionIT.java | 98 +++++++++++++++++++ .../RetentionLeaseBackgroundSyncAction.java | 2 +- .../index/seqno/RetentionLeaseSyncAction.java | 4 +- 5 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/109414.yaml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml new file mode 100644 index 0000000000000..81b7541bde35b --- /dev/null +++ b/docs/changelog/109414.yaml @@ -0,0 +1,6 @@ +pr: 109414 +summary: Don't fail retention lease sync actions due to capacity constraints +area: CRUD +type: bug +issues: + - 105926 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..0bab5be245ecf --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/BackgroundRetentionLeaseSyncActionIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.stream.Stream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class BackgroundRetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() throws Exception { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + final ClusterState state = internalCluster().clusterService().state(); + final Index testIndex = resolveIndex("test"); + final ShardId testIndexShardZero = new ShardId(testIndex, 0); + final String testLeaseId = "test-lease/123"; + RetentionLeases newLeases = addTestLeaseToRetentionLeases(primary, testIndex, testLeaseId); + internalCluster().getInstance(RetentionLeaseSyncer.class, primary) + .backgroundSync( + testIndexShardZero, + state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard().allocationId().getId(), + state.term(), + newLeases + ); + + // Wait for test lease to appear on replica + IndicesService replicaIndicesService = internalCluster().getInstance(IndicesService.class, replica); + assertBusy(() -> { + RetentionLeases retentionLeases = replicaIndicesService.indexService(testIndex).getShard(0).getRetentionLeases(); + assertTrue(retentionLeases.contains(testLeaseId)); + }); + } + } + + private static RetentionLeases addTestLeaseToRetentionLeases(String primaryNodeName, Index index, String leaseId) { + IndicesService primaryIndicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName); + RetentionLeases currentLeases = primaryIndicesService.indexService(index).getShard(0).getRetentionLeases(); + RetentionLease newLease = new RetentionLease(leaseId, 0, System.currentTimeMillis(), "test source"); + return new RetentionLeases( + currentLeases.primaryTerm(), + currentLeases.version() + 1, + Stream.concat(currentLeases.leases().stream(), Stream.of(newLease)).toList() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java new file mode 100644 index 0000000000000..2d8f455792172 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RetentionLeaseSyncActionIT extends ESIntegTestCase { + + public void testActionCompletesWhenReplicaCircuitBreakersAreAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (var ignored = fullyAllocateCircuitBreakerOnNode(replica, CircuitBreaker.IN_FLIGHT_REQUESTS)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + public void testActionCompletesWhenPrimaryIndexingPressureIsAtCapacity() { + internalCluster().startMasterOnlyNodes(1); + String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); + + String replica = internalCluster().startDataOnlyNode(); + ensureGreen("test"); + + try (Releasable ignored = fullyAllocatePrimaryIndexingCapacityOnNode(primary)) { + assertThatRetentionLeaseSyncCompletesSuccessfully(primary); + } + } + + private static void assertThatRetentionLeaseSyncCompletesSuccessfully(String primaryNodeName) { + RetentionLeaseSyncer instance = internalCluster().getInstance(RetentionLeaseSyncer.class, primaryNodeName); + PlainActionFuture retentionLeaseSyncResult = new PlainActionFuture<>(); + ClusterState state = internalCluster().clusterService().state(); + ShardId testIndexShardZero = new ShardId(resolveIndex("test"), 0); + ShardRouting primaryShard = state.routingTable().shardRoutingTable(testIndexShardZero).primaryShard(); + instance.sync( + testIndexShardZero, + primaryShard.allocationId().getId(), + state.term(), + RetentionLeases.EMPTY, + retentionLeaseSyncResult + ); + safeGet(retentionLeaseSyncResult); + } + + /** + * Fully allocate primary indexing capacity on a node + * + * @param targetNode The name of the node on which to allocate + * @return A {@link Releasable} which will release the capacity when closed + */ + private static Releasable fullyAllocatePrimaryIndexingCapacityOnNode(String targetNode) { + return internalCluster().getInstance(IndexingPressure.class, targetNode) + .markPrimaryOperationStarted( + 1, + IndexingPressure.MAX_INDEXING_BYTES.get(internalCluster().getInstance(Settings.class, targetNode)).getBytes() + 1, + true + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index a7fa88633b806..f90d8945857b7 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -84,7 +84,7 @@ public RetentionLeaseBackgroundSyncAction( threadPool.executor(ThreadPool.Names.MANAGEMENT), SyncGlobalCheckpointAfterOperation.DoNotSync, PrimaryActionExecution.RejectOnOverload, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index b7d632eab3bc5..67ed7c6e4c191 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -91,10 +91,10 @@ public RetentionLeaseSyncAction( RetentionLeaseSyncAction.Request::new, RetentionLeaseSyncAction.Request::new, new ManagementOnlyExecutorFunction(threadPool), - PrimaryActionExecution.RejectOnOverload, + PrimaryActionExecution.Force, indexingPressure, systemIndices, - ReplicaActionExecution.SubjectToCircuitBreaker + ReplicaActionExecution.BypassCircuitBreaker ); } From 1072f2bbab64d49244d9592239c4c28a514c2237 Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Thu, 22 Aug 2024 21:15:29 -0500 Subject: [PATCH 030/352] Add interval based SLM scheduling (#110847) Add the ability to schedule an SLM policies with a time unit interval schedule rather than a cron job schedule. For example, an slm policy can be created with the argument "schedule":"30m". This will create a policy that will run 30 minutes after the policy modification_date. It will then run again every time another 30 minutes has passed. Every time the policy is changed, the next snapshot will be re-scheduled to run one interval after the new modification date. --- docs/changelog/110847.yaml | 5 + docs/reference/slm/apis/slm-put.asciidoc | 31 +++- .../lifecycle/DataStreamLifecycleService.java | 8 +- .../common/scheduler/SchedulerEngine.java | 60 +++---- .../health/HealthPeriodicLogger.java | 2 +- .../scheduler/SchedulerEngineTests.java | 2 +- .../license/ClusterStateLicenseService.java | 6 +- .../core/slm/SnapshotLifecyclePolicy.java | 79 ++++++-- .../core/slm/SnapshotLifecyclePolicyItem.java | 3 +- .../slm/SnapshotLifecyclePolicyMetadata.java | 5 + .../slm/SnapshotLifecyclePolicyItemTests.java | 2 +- .../SnapshotLifecyclePolicyMetadataTests.java | 41 ++++- .../IndexLifecycleInitialisationTests.java | 4 +- .../xpack/ilm/IndexLifecycleService.java | 4 +- .../xpack/rollup/job/RollupJobTask.java | 4 +- .../xpack/slm/SnapshotLifecycleRestIT.java | 41 ++++- .../xpack/slm/SnapshotLifecycle.java | 6 +- .../xpack/slm/SnapshotLifecycleFeatures.java | 6 + .../xpack/slm/SnapshotLifecycleService.java | 25 ++- .../xpack/slm/SnapshotLifecycleTask.java | 8 +- .../xpack/slm/SnapshotRetentionTask.java | 8 +- .../slm/action/ReservedSnapshotAction.java | 8 +- .../TransportPutSnapshotLifecycleAction.java | 8 +- .../slm/SnapshotLifecyclePolicyTests.java | 170 +++++++++++++++++- .../slm/SnapshotLifecycleServiceTests.java | 143 +++++++++++---- .../slm/SnapshotRetentionServiceTests.java | 5 +- ...vedSnapshotLifecycleStateServiceTests.java | 64 ++++++- .../history/SnapshotHistoryStoreTests.java | 13 +- 28 files changed, 611 insertions(+), 150 deletions(-) create mode 100644 docs/changelog/110847.yaml diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml new file mode 100644 index 0000000000000..214adc97ac7cb --- /dev/null +++ b/docs/changelog/110847.yaml @@ -0,0 +1,5 @@ +pr: 110847 +summary: SLM Interval based scheduling +area: ILM+SLM +type: feature +issues: [] diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index be265554deef5..51ad571ee12e7 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -100,13 +100,19 @@ Minimum number of snapshots to retain, even if the snapshots have expired. ==== `schedule`:: -(Required, <>) +(Required, <> or <>) Periodic or absolute schedule at which the policy creates snapshots. {slm-init} applies `schedule` changes immediately. +Schedule may be either a Cron schedule or a time unit describing the interval between snapshots. +When using a time unit interval, the first snapshot is scheduled one interval after the policy modification time, and then again every interval after. + [[slm-api-put-example]] ==== {api-examples-title} + +[[slm-api-put-daily-policy]] +===== Create a policy Create a `daily-snapshots` lifecycle policy: [source,console] @@ -138,4 +144,25 @@ PUT /_slm/policy/daily-snapshots <6> Optional retention configuration <7> Keep snapshots for 30 days <8> Always keep at least 5 successful snapshots, even if they're more than 30 days old -<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old \ No newline at end of file +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old + + +[[slm-api-put-hourly-policy]] +===== Use Interval Scheduling +Create an `hourly-snapshots` lifecycle policy using interval scheduling: + +[source,console] +-------------------------------------------------- +PUT /_slm/policy/hourly-snapshots +{ + "schedule": "1h", + "name": "", + "repository": "my_repository", + "config": { + "indices": ["data-*", "important"] + } +} +-------------------------------------------------- +// TEST[setup:setup-repository] +Creates a snapshot once every hour. The first snapshot will be created one hour after the policy is modified, +with subsequent snapshots being created every hour afterward. diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 0cb29dbcf5b2f..0b24a3c9c9101 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -296,13 +296,13 @@ public void close() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(LIFECYCLE_JOB_NAME)) { + if (event.jobName().equals(LIFECYCLE_JOB_NAME)) { if (this.isMaster) { logger.trace( "Data stream lifecycle job triggered: {}, {}, {}", - event.getJobName(), - event.getScheduledTime(), - event.getTriggeredTime() + event.jobName(), + event.scheduledTime(), + event.triggeredTime() ); run(clusterService.state()); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { diff --git a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java index be4d7c741bc92..ab63ab4062767 100644 --- a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java +++ b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.core.Nullable; import java.time.Clock; import java.util.Collection; @@ -39,47 +40,27 @@ */ public class SchedulerEngine { - public static class Job { - private final String id; - private final Schedule schedule; - + /** + * In most cases a Job only requires a `schedule` and an `id`, but an optional `fixedStartTime` + * can also be used. This is used as a fixed `startTime` argument for all calls to + * `schedule.nextScheduledTimeAfter(startTime, now)`. Interval-based schedules use `startTime` + * as a basis time from which all run times are calculated. If a Job does not contain a + * `fixedStartTime`, this basis time will be the time at which the Job is added to the SchedulerEngine. + * This could change if a master change or restart causes a new SchedulerEngine to be constructed. + * But using a `fixedStartTime` populated from a time stored in cluster state allows the basis time + * to remain unchanged across master changes and restarts. + * + * @param id the id of the job + * @param schedule the schedule which is used to calculate when the job runs + * @param fixedStartTime a fixed time in the past which the schedule uses to calculate run times, + */ + public record Job(String id, Schedule schedule, @Nullable Long fixedStartTime) { public Job(String id, Schedule schedule) { - this.id = id; - this.schedule = schedule; - } - - public String getId() { - return id; - } - - public Schedule getSchedule() { - return schedule; + this(id, schedule, null); } } - public static class Event { - private final String jobName; - private final long triggeredTime; - private final long scheduledTime; - - public Event(String jobName, long triggeredTime, long scheduledTime) { - this.jobName = jobName; - this.triggeredTime = triggeredTime; - this.scheduledTime = scheduledTime; - } - - public String getJobName() { - return jobName; - } - - public long getTriggeredTime() { - return triggeredTime; - } - - public long getScheduledTime() { - return scheduledTime; - } - + public record Event(String jobName, long triggeredTime, long scheduledTime) { @Override public String toString() { return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; @@ -159,12 +140,13 @@ public Set scheduledJobIds() { } public void add(Job job) { - ActiveSchedule schedule = new ActiveSchedule(job.getId(), job.getSchedule(), clock.millis()); + final long startTime = job.fixedStartTime() == null ? clock.millis() : job.fixedStartTime(); + ActiveSchedule schedule = new ActiveSchedule(job.id(), job.schedule(), startTime); schedules.compute(schedule.name, (name, previousSchedule) -> { if (previousSchedule != null) { previousSchedule.cancel(); } - logger.debug(() -> "added job [" + job.getId() + "]"); + logger.debug(() -> "added job [" + job.id() + "]"); return schedule; }); } diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 8208e4bd70c34..97c0679bed34f 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -299,7 +299,7 @@ protected void doClose() throws IOException { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { + if (event.jobName().equals(HEALTH_PERIODIC_LOGGER_JOB_NAME) && this.enabled) { this.tryToLogHealth(); } } diff --git a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java index e10898da978be..8672189220a9f 100644 --- a/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java +++ b/server/src/test/java/org/elasticsearch/common/scheduler/SchedulerEngineTests.java @@ -166,7 +166,7 @@ public void testCancellingDuringRunPreventsRescheduling() throws Exception { final String jobId = randomAlphaOfLength(4); try { engine.register(event -> { - assertThat(event.getJobName(), is(jobId)); + assertThat(event.jobName(), is(jobId)); calledCount.incrementAndGet(); jobRunningLatch.countDown(); try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index b352a9abce886..f5123b9352fe3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -289,11 +289,11 @@ public void triggered(SchedulerEngine.Event event) { final LicensesMetadata licensesMetadata = getLicensesMetadata(); if (licensesMetadata != null) { final License license = licensesMetadata.getLicense(); - if (event.getJobName().equals(LICENSE_JOB)) { + if (event.jobName().equals(LICENSE_JOB)) { updateXPackLicenseState(license); - } else if (event.getJobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { + } else if (event.jobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { expirationCallbacks.stream() - .filter(expirationCallback -> expirationCallback.getId().equals(event.getJobName())) + .filter(expirationCallback -> expirationCallback.getId().equals(event.jobName())) .forEach(expirationCallback -> expirationCallback.on(license)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index fb892a318f07c..23bf21004040a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.slm; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.cluster.SimpleDiffable; @@ -15,6 +14,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.scheduler.SchedulerEngine; +import org.elasticsearch.common.scheduler.TimeValueSchedule; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.snapshots.SnapshotsService; @@ -24,9 +25,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.scheduler.Cron; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.time.Clock; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -48,6 +51,7 @@ public class SnapshotLifecyclePolicy implements SimpleDiffable configuration; private final SnapshotRetentionConfiguration retentionPolicy; + private final boolean isCronSchedule; private static final ParseField NAME = new ParseField("name"); private static final ParseField SCHEDULE = new ParseField("schedule"); @@ -92,6 +96,7 @@ public SnapshotLifecyclePolicy( this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); this.configuration = configuration; this.retentionPolicy = retentionPolicy; + this.isCronSchedule = isCronSchedule(schedule); } public SnapshotLifecyclePolicy(StreamInput in) throws IOException { @@ -101,6 +106,7 @@ public SnapshotLifecyclePolicy(StreamInput in) throws IOException { this.repository = in.readString(); this.configuration = in.readGenericMap(); this.retentionPolicy = in.readOptionalWriteable(SnapshotRetentionConfiguration::new); + this.isCronSchedule = isCronSchedule(schedule); } public String getId() { @@ -129,9 +135,43 @@ public SnapshotRetentionConfiguration getRetentionPolicy() { return this.retentionPolicy; } - public long calculateNextExecution() { - final Cron scheduleEvaluator = new Cron(this.schedule); - return scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + boolean isCronSchedule() { + return this.isCronSchedule; + } + + /** + * @return whether `schedule` is a cron expression + */ + static boolean isCronSchedule(String schedule) { + try { + new Cron(schedule); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + /** + * @return whether `schedule` is an interval time unit expression + */ + public static boolean isIntervalSchedule(String schedule) { + try { + TimeValue.parseTimeValue(schedule, "schedule"); + return true; + } catch (IllegalArgumentException e) { + return false; + } + } + + public long calculateNextExecution(long modifiedDate, Clock clock) { + if (isCronSchedule()) { + final Cron scheduleEvaluator = new Cron(this.schedule); + return scheduleEvaluator.getNextValidTimeAfter(clock.millis()); + } else { + final TimeValue interval = TimeValue.parseTimeValue(this.schedule, SCHEDULE.getPreferredName()); + final TimeValueSchedule timeValueSchedule = new TimeValueSchedule(interval); + return timeValueSchedule.nextScheduledTimeAfter(modifiedDate, clock.millis()); + } } /** @@ -139,13 +179,17 @@ public long calculateNextExecution() { *

* In ordinary cases, this can be treated as the interval between executions of the schedule (for schedules like 'twice an hour' or * 'every five minutes'). - * + * @param clock a clock to provide current time * @return a {@link TimeValue} representing the difference between the next two valid times after now, or {@link TimeValue#MINUS_ONE} * if either of the next two times after now is unsupported according to @{@link Cron#getNextValidTimeAfter(long)} */ - public TimeValue calculateNextInterval() { + public TimeValue calculateNextInterval(Clock clock) { + if (isCronSchedule() == false) { + return TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + } + final Cron scheduleEvaluator = new Cron(this.schedule); - long next1 = scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + long next1 = scheduleEvaluator.getNextValidTimeAfter(clock.millis()); long next2 = scheduleEvaluator.getNextValidTimeAfter(next1); if (next1 > 0 && next2 > 0) { return TimeValue.timeValueMillis(next2 - next1); @@ -154,6 +198,15 @@ public TimeValue calculateNextInterval() { } } + public SchedulerEngine.Job buildSchedulerJob(String jobId, long modifiedDate) { + if (isCronSchedule()) { + return new SchedulerEngine.Job(jobId, new CronSchedule(schedule)); + } else { + TimeValue timeValue = TimeValue.parseTimeValue(schedule, "schedule"); + return new SchedulerEngine.Job(jobId, new TimeValueSchedule(timeValue), modifiedDate); + } + } + public ActionRequestValidationException validate() { ActionRequestValidationException err = new ActionRequestValidationException(); @@ -182,13 +235,19 @@ public ActionRequestValidationException validate() { } // Schedule validation + // n.b. there's more validation beyond this in SnapshotLifecycleService#validateMinimumInterval if (Strings.hasText(schedule) == false) { err.addValidationError("invalid schedule [" + schedule + "]: must not be empty"); } else { try { - new Cron(schedule); - } catch (IllegalArgumentException e) { - err.addValidationError("invalid schedule: " + ExceptionsHelper.unwrapCause(e).getMessage()); + var intervalTimeValue = TimeValue.parseTimeValue(schedule, SCHEDULE.getPreferredName()); + if (intervalTimeValue.millis() == 0) { + err.addValidationError("invalid schedule [" + schedule + "]: time unit must be at least 1 millisecond"); + } + } catch (IllegalArgumentException e1) { + if (isCronSchedule(schedule) == false) { + err.addValidationError("invalid schedule [" + schedule + "]: must be a valid cron expression or time unit"); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java index 6a352461c2e1e..c3c70e595eb75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItem.java @@ -20,6 +20,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.time.Clock; import java.util.Objects; /** @@ -171,7 +172,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.timeField( SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION_MILLIS.getPreferredName(), SnapshotLifecyclePolicyMetadata.NEXT_EXECUTION.getPreferredName(), - policy.calculateNextExecution() + policy.calculateNextExecution(modifiedDate, Clock.systemUTC()) ); if (snapshotInProgress != null) { builder.field(SNAPSHOT_IN_PROGRESS.getPreferredName(), snapshotInProgress); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index 0a97810fadacf..672578787762e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -181,6 +182,10 @@ public long getInvocationsSinceLastSuccess() { return invocationsSinceLastSuccess; } + public SchedulerEngine.Job buildSchedulerJob(String jobId) { + return policy.buildSchedulerJob(jobId, modifiedDate); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java index 3eeaa18f0a81e..2dd1d8d4ec13a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java @@ -67,7 +67,7 @@ protected SnapshotLifecyclePolicyItem mutateInstance(SnapshotLifecyclePolicyItem return new SnapshotLifecyclePolicyItem( instance.getPolicy(), instance.getVersion(), - randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong), + randomValueOtherThan(instance.getModifiedDate(), SnapshotLifecyclePolicyMetadataTests::randomModifiedTime), instance.getLastSuccess(), instance.getLastFailure(), instance.getSnapshotInProgress(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java index 090b4fe78253d..66e25c3b91db2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java @@ -13,6 +13,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; import java.util.HashMap; import java.util.Map; @@ -79,7 +81,7 @@ public static SnapshotLifecyclePolicyMetadata createRandomPolicyMetadata(String SnapshotLifecyclePolicyMetadata.Builder builder = SnapshotLifecyclePolicyMetadata.builder() .setPolicy(randomSnapshotLifecyclePolicy(policyId)) .setVersion(randomNonNegativeLong()) - .setModifiedDate(randomNonNegativeLong()); + .setModifiedDate(randomModifiedTime()); if (randomBoolean()) { builder.setHeaders(randomHeaders()); } @@ -102,6 +104,7 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String polic for (int i = 0; i < randomIntBetween(2, 5); i++) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } + return new SnapshotLifecyclePolicy( policyId, randomAlphaOfLength(4), @@ -122,7 +125,41 @@ public static SnapshotRetentionConfiguration randomRetention() { ); } - public static String randomSchedule() { + public static String randomCronSchedule() { return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; } + + public static String randomTimeValueString() { + // restrict to intervals greater than slm.minimum_interval value of 15 minutes + Duration minInterval = Duration.ofMinutes(15); + Map unitMinVal = Map.of( + "nanos", + minInterval.toNanos(), + "micros", + minInterval.toNanos() * 1000, + "ms", + minInterval.toMillis(), + "s", + minInterval.toSeconds(), + "m", + minInterval.toMinutes(), + "h", + minInterval.toHours(), + "d", + minInterval.toDays() + ); + var unit = randomFrom(unitMinVal.keySet()); + long minVal = Math.max(1, unitMinVal.get(unit)); + long value = randomLongBetween(minVal, 1000 * minVal); + return value + unit; + } + + public static String randomSchedule() { + return randomBoolean() ? randomCronSchedule() : randomTimeValueString(); + } + + public static long randomModifiedTime() { + // if modified time is after the current time, validation will fail + return randomLongBetween(0, Clock.systemUTC().millis()); + } } diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 3530f33704beb..30d1d6f7c914b 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -496,7 +496,7 @@ public void testPollIntervalUpdate() throws Exception { assertThat(indexLifecycleService.getScheduler().jobCount(), equalTo(1)); }); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(pollInterval)); } @@ -504,7 +504,7 @@ public void testPollIntervalUpdate() throws Exception { TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000)); updateClusterSettings(Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, newPollInterval.getStringRep())); { - TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule(); + TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().schedule(); assertThat(schedule.getInterval(), equalTo(newPollInterval)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index c2e2c80998992..9c978ffc25cba 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -353,8 +353,8 @@ private void cancelJob() { @Override public void triggered(SchedulerEngine.Event event) { - if (event.getJobName().equals(XPackField.INDEX_LIFECYCLE)) { - logger.trace("job triggered: " + event.getJobName() + ", " + event.getScheduledTime() + ", " + event.getTriggeredTime()); + if (event.jobName().equals(XPackField.INDEX_LIFECYCLE)) { + logger.trace("job triggered: " + event.jobName() + ", " + event.scheduledTime() + ", " + event.triggeredTime()); triggerPolicies(clusterService.state(), false); } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f4c420db47ac3..5704d7837268b 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -463,8 +463,8 @@ public synchronized void onCancelled() { public synchronized void triggered(SchedulerEngine.Event event) { // Verify this is actually the event that we care about, then trigger the indexer. // Note that the status of the indexer is checked in the indexer itself - if (event.getJobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { - logger.debug("Rollup indexer [" + event.getJobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); + if (event.jobName().equals(SCHEDULE_NAME + "_" + job.getConfig().getId())) { + logger.debug("Rollup indexer [" + event.jobName() + "] schedule has triggered, state: [" + indexer.getState() + "]"); indexer.maybeTriggerAsyncJob(System.currentTimeMillis()); } } diff --git a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index abaf9a14aeadb..d42c8ec9655ef 100644 --- a/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/slm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.io.InputStream; +import java.time.Duration; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -108,7 +110,8 @@ public void testFullPolicySnapshot() throws Exception { // allow arbitrarily frequent slm snapshots disableSLMMinimumIntervalValidation(); - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); // Check that the snapshot was actually taken assertBusy(() -> { @@ -176,7 +179,8 @@ public void testPolicyFailure() throws Exception { disableSLMMinimumIntervalValidation(); // Create a policy with ignore_unavailable: false and an index that doesn't exist - createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoName, indexPattern, false); + var schedule = randomBoolean() ? "*/1 * * * * ?" : "1s"; + createSnapshotPolicy(policyName, "snap", schedule, repoName, indexPattern, false); assertBusy(() -> { // Check that the failure is written to the cluster state @@ -300,10 +304,11 @@ public void testStartStopStatus() throws Exception { }); try { + var schedule = randomBoolean() ? "0 0/15 * * * ?" : "15m"; createSnapshotPolicy( policyName, "snap", - "0 0/15 * * * ?", + schedule, repoId, indexName, true, @@ -671,6 +676,36 @@ public void testSnapshotRetentionWithMissingRepo() throws Exception { }, 60, TimeUnit.SECONDS); } + @SuppressWarnings("unchecked") + public void testGetIntervalSchedule() throws Exception { + final String indexName = "index-1"; + final String policyName = "policy-1"; + final String repoId = "repo-1"; + + initializeRepo(repoId); + + var schedule = "30m"; + var now = Instant.now(); + createSnapshotPolicy(policyName, "snap", schedule, repoId, indexName, true); + + assertBusy(() -> { + Request getReq = new Request("GET", "/_slm/policy/" + policyName); + Response policyMetadata = client().performRequest(getReq); + Map policyResponseMap; + try (InputStream is = policyMetadata.getEntity().getContent()) { + policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + + Map policyMetadataMap = (Map) policyResponseMap.get(policyName); + Long nextExecutionMillis = (Long) policyMetadataMap.get("next_execution_millis"); + assertNotNull(nextExecutionMillis); + + Instant nextExecution = Instant.ofEpochMilli(nextExecutionMillis); + assertTrue(nextExecution.isAfter(now.plus(Duration.ofMinutes(29)))); + assertTrue(nextExecution.isBefore(now.plus(Duration.ofMinutes(31)))); + }); + } + public Map getLocation(String path) { try { Response executeRepsonse = client().performRequest(new Request("GET", path)); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 0d79ecf31670c..192807d667abb 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.license.XPackLicenseState; @@ -92,6 +93,7 @@ public class SnapshotLifecycle extends Plugin implements ActionPlugin, HealthPlu private final SetOnce snapshotRetentionService = new SetOnce<>(); private final SetOnce snapshotHistoryStore = new SetOnce<>(); private final SetOnce slmHealthIndicatorService = new SetOnce<>(); + private final SetOnce featureService = new SetOnce<>(); private final Settings settings; public SnapshotLifecycle(Settings settings) { @@ -124,7 +126,7 @@ public Collection createComponents(PluginServices services) { ClusterService clusterService = services.clusterService(); ThreadPool threadPool = services.threadPool(); final List components = new ArrayList<>(); - + featureService.set(services.featureService()); SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, @@ -236,7 +238,7 @@ public List getRestHandlers( } List> reservedClusterStateHandlers() { - return List.of(new ReservedSnapshotAction()); + return List.of(new ReservedSnapshotAction(featureService.get())); } @Override diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java index f3dfe4fb26f65..96b962f70a1b6 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -13,8 +13,14 @@ import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; import java.util.Map; +import java.util.Set; public class SnapshotLifecycleFeatures implements FeatureSpecification { + @Override + public Set getFeatures() { + return Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE); + } + @Override public Map getHistoricalFeatures() { return Map.of(SnapshotLifecycleTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index 6d77926149334..b93f90de73f05 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -20,10 +20,11 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.OperationModeUpdateTask; -import org.elasticsearch.xpack.core.scheduler.CronSchedule; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; @@ -45,7 +46,7 @@ * task according to the policy's schedule. */ public class SnapshotLifecycleService implements Closeable, ClusterStateListener { - + public static final NodeFeature INTERVAL_SCHEDULE = new NodeFeature("slm.interval_schedule"); private static final Logger logger = LogManager.getLogger(SnapshotLifecycleService.class); private static final String JOB_PATTERN_SUFFIX = "-\\d+$"; @@ -193,15 +194,13 @@ public void maybeScheduleSnapshot(final SnapshotLifecyclePolicyMetadata snapshot // is identical to an existing job (meaning the version has not changed) then this does // not reschedule it. scheduledTasks.computeIfAbsent(jobId, id -> { - final SchedulerEngine.Job job = new SchedulerEngine.Job( - jobId, - new CronSchedule(snapshotLifecyclePolicy.getPolicy().getSchedule()) - ); if (existingJobsFoundAndCancelled) { logger.info("rescheduling updated snapshot lifecycle job [{}]", jobId); } else { logger.info("scheduling snapshot lifecycle job [{}]", jobId); } + + final SchedulerEngine.Job job = snapshotLifecyclePolicy.buildSchedulerJob(jobId); scheduler.add(job); return job; }); @@ -249,7 +248,7 @@ public static void validateRepositoryExists(final String repository, final Clust */ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecycle, final ClusterState state) { TimeValue minimum = LifecycleSettings.SLM_MINIMUM_INTERVAL_SETTING.get(state.metadata().settings()); - TimeValue next = lifecycle.calculateNextInterval(); + TimeValue next = lifecycle.calculateNextInterval(Clock.systemUTC()); if (next.duration() > 0 && minimum.duration() > 0 && next.millis() < minimum.millis()) { throw new IllegalArgumentException( "invalid schedule [" @@ -262,6 +261,18 @@ public static void validateMinimumInterval(final SnapshotLifecyclePolicy lifecyc } } + /** + * Validate that interval schedule feature is not supported by all nodes + * @throws IllegalArgumentException if is interval expression but interval schedule not supported + */ + public static void validateIntervalScheduleSupport(String schedule, FeatureService featureService, ClusterState state) { + if (SnapshotLifecyclePolicy.isIntervalSchedule(schedule) && featureService.clusterHasFeature(state, INTERVAL_SCHEDULE) == false) { + throw new IllegalArgumentException( + "Unable to use slm interval schedules in mixed-clusters with nodes that do not support feature " + INTERVAL_SCHEDULE.id() + ); + } + } + @Override public void close() { if (this.running.compareAndSet(true, false)) { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index adf011e0ade37..d49f32869f28a 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -68,21 +68,21 @@ public SnapshotLifecycleTask(final Client client, final ClusterService clusterSe @Override public void triggered(SchedulerEngine.Event event) { - logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.getJobName()); + logger.debug("snapshot lifecycle policy task triggered from job [{}]", event.jobName()); - final Optional snapshotName = maybeTakeSnapshot(event.getJobName(), client, clusterService, historyStore); + final Optional snapshotName = maybeTakeSnapshot(event.jobName(), client, clusterService, historyStore); // Would be cleaner if we could use Optional#ifPresentOrElse snapshotName.ifPresent( name -> logger.info( "snapshot lifecycle policy job [{}] issued new snapshot creation for [{}] successfully", - event.getJobName(), + event.jobName(), name ) ); if (snapshotName.isPresent() == false) { - logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.getJobName()); + logger.warn("snapshot lifecycle policy for job [{}] no longer exists, snapshot not created", event.jobName()); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 0cf1373e92beb..678e6941599c9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -88,21 +88,21 @@ public SnapshotRetentionTask( @Override public void triggered(SchedulerEngine.Event event) { - assert event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) - || event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) + assert event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) + || event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) : "expected id to be " + SnapshotRetentionService.SLM_RETENTION_JOB_ID + " or " + SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID + " but it was " - + event.getJobName(); + + event.jobName(); final ClusterState state = clusterService.state(); // Skip running retention if SLM is disabled, however, even if it's // disabled we allow manual running. if (SnapshotLifecycleService.slmStoppedOrStopping(state) - && event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { + && event.jobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { logger.debug("skipping SLM retention as SLM is currently stopped or stopping"); return; } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java index f14edd89b826d..192b03aa385d5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -41,7 +42,11 @@ public class ReservedSnapshotAction implements ReservedClusterStateHandler prepare(List { private static final Logger logger = LogManager.getLogger(TransportPutSnapshotLifecycleAction.class); + private final FeatureService featureService; @Inject public TransportPutSnapshotLifecycleAction( @@ -56,7 +58,8 @@ public TransportPutSnapshotLifecycleAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + FeatureService featureService ) { super( PutSnapshotLifecycleAction.NAME, @@ -69,6 +72,7 @@ public TransportPutSnapshotLifecycleAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); + this.featureService = featureService; } @Override @@ -78,8 +82,8 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { + SnapshotLifecycleService.validateIntervalScheduleSupport(request.getLifecycle().getSchedule(), featureService, state); SnapshotLifecycleService.validateRepositoryExists(request.getLifecycle().getRepository(), state); - SnapshotLifecycleService.validateMinimumInterval(request.getLifecycle(), state); // headers from the thread context stored by the AuthenticationService to be shared between the diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index fc4ee7867ed04..b7674a2d60bff 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -19,11 +19,17 @@ import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import java.io.IOException; +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomSnapshotLifecyclePolicy; +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomTimeValueString; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -34,10 +40,11 @@ public class SnapshotLifecyclePolicyTests extends AbstractXContentSerializingTes private String id; public void testToRequest() { + var schedule = randomBoolean() ? "0 1 2 3 4 ? 2099" : "30m"; SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", - "0 1 2 3 4 ? 2099", + schedule, "repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY @@ -47,13 +54,13 @@ public void testToRequest() { Collections.singletonMap("policy", "id") ); - p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); + p = new SnapshotLifecyclePolicy("id", "name", schedule, "repo", null, null); request = p.toRequest(TEST_REQUEST_TIMEOUT); expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo").uuid(request.uuid()); assertEquals(expected, request); } - public void testNextExecutionTime() { + public void testNextExecutionTimeSchedule() { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", @@ -62,10 +69,100 @@ public void testNextExecutionTime() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextExecution(), equalTo(4078864860000L)); + assertThat(p.calculateNextExecution(-1, Clock.systemUTC()), equalTo(4078864860000L)); } - public void testCalculateNextInterval() { + public void testNextExecutionTimeInterval() { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + { + // current time is exactly modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is half an interval past modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofMinutes(15)); + Instant expected = Instant.parse("2024-07-17T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time is a full day (24 intervals) ahead of modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.plus(Duration.ofDays(1)); + Instant expected = Instant.parse("2024-07-18T00:30:00.000Z").truncatedTo(ChronoUnit.SECONDS); + assertThat(p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), equalTo(expected.toEpochMilli())); + } + + { + // current time before modified time + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime.minus(Duration.ofHours(1)); + expectThrows(AssertionError.class, () -> p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime))); + } + + { + // current time is every minute of a day + Instant modifiedTime = Instant.parse("2024-07-17T00:00:00.000Z").truncatedTo(ChronoUnit.SECONDS); + Instant currentTime = modifiedTime; + Instant expectedTime = modifiedTime.plus(Duration.ofMinutes(30)); + + for (; currentTime.isBefore(modifiedTime.plus(Duration.ofDays(1))); currentTime = currentTime.plus(Duration.ofMinutes(1))) { + if (currentTime.equals(expectedTime)) { + expectedTime = expectedTime.plus(Duration.ofMinutes(30)); + } + assertThat( + p.calculateNextExecution(modifiedTime.toEpochMilli(), fixedClock(currentTime)), + equalTo(expectedTime.toEpochMilli()) + ); + } + } + } + + private static Clock fixedClock(Instant instant) { + return Clock.fixed(instant, ZoneOffset.UTC); + } + + public void testCalculateNextIntervalInterval() { + + { + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(30))); + } + { + String schedule = randomTimeValueString(); + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( + "id", + "name", + schedule, + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.parseTimeValue(schedule, "schedule"))); + } + } + + public void testCalculateNextIntervalSchedule() { { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", @@ -75,7 +172,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.timeValueMinutes(5))); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.timeValueMinutes(5))); } { @@ -87,7 +184,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } { @@ -99,7 +196,7 @@ public void testCalculateNextInterval() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextInterval(), equalTo(TimeValue.MINUS_ONE)); + assertThat(p.calculateNextInterval(Clock.systemUTC()), equalTo(TimeValue.MINUS_ONE)); } } @@ -123,7 +220,7 @@ public void testValidation() { + " the following characters " + Strings.INVALID_FILENAME_CHARS, "invalid repository name [ ]: cannot be empty", - "invalid schedule: invalid cron expression [* * * * * L]" + "invalid schedule [* * * * * L]: must be a valid cron expression or time unit" ) ); } @@ -149,6 +246,34 @@ public void testValidation() { ); } + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "0d", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [0d]: time unit must be at least 1 millisecond")); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "999micros", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e.validationErrors(), containsInAnyOrder("invalid schedule [999micros]: time unit must be at least 1 millisecond")); + } + { SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( "my_policy", @@ -161,6 +286,33 @@ public void testValidation() { ValidationException e = policy.validate(); assertThat(e, nullValue()); } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "30m", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } + + { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( + "my_policy", + "my_snap", + "1ms", + "repo", + Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY + ); + + ValidationException e = policy.validate(); + assertThat(e, nullValue()); + } } public void testMetadataValidation() { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 5b59ac9efc0ab..36887681f5575 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -37,6 +38,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleStats; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; @@ -48,6 +50,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -108,7 +111,7 @@ public void testRepositoryExistenceForMissingRepo() { public void testNothingScheduledWhenNotRunning() throws InterruptedException { ClockMock clock = new ClockMock(); SnapshotLifecyclePolicyMetadata initialPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("initial", "*/1 * * * * ?")) + .setPolicy(createPolicy("initial", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -133,7 +136,7 @@ public void testNothingScheduledWhenNotRunning() throws InterruptedException { sls.init(); SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -211,7 +214,7 @@ public void testPolicyCRUD() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setModifiedDate(1) .build(); @@ -240,7 +243,7 @@ public void testPolicyCRUD() throws Exception { int currentCount = triggerCount.get(); previousState = state; SnapshotLifecyclePolicyMetadata newPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(2) @@ -253,7 +256,7 @@ public void testPolicyCRUD() throws Exception { CopyOnWriteArrayList triggeredJobs = new CopyOnWriteArrayList<>(); trigger.set(e -> { - triggeredJobs.add(e.getJobName()); + triggeredJobs.add(e.jobName()); triggerCount.incrementAndGet(); }); clock.fastForwardSeconds(1); @@ -283,7 +286,7 @@ public void testPolicyCRUD() throws Exception { // When the service is no longer master, all jobs should be automatically cancelled policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo", "*/1 * * * * ?")) + .setPolicy(createPolicy("foo", randomBoolean() ? "*/1 * * * * ?" : "1s")) .setHeaders(Collections.emptyMap()) .setVersion(3) .setModifiedDate(1) @@ -343,7 +346,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { Map policies = new HashMap<>(); SnapshotLifecyclePolicyMetadata policy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-2", "30 * * * * ?")) + .setPolicy(createPolicy("foo-2", randomBoolean() ? "30 * * * * ?" : "30s")) .setHeaders(Collections.emptyMap()) .setVersion(1) .setModifiedDate(1) @@ -358,7 +361,7 @@ public void testPolicyNamesEndingInNumbers() throws Exception { assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2-1"))); SnapshotLifecyclePolicyMetadata secondPolicy = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createPolicy("foo-1", "45 * * * * ?")) + .setPolicy(createPolicy("foo-1", randomBoolean() ? "45 * * * * ?" : "45s")) .setHeaders(Collections.emptyMap()) .setVersion(2) .setModifiedDate(1) @@ -410,33 +413,70 @@ public void testValidateMinimumInterval() { ) .build(); - for (String schedule : List.of("0 0/15 * * * ?", "0 0 1 * * ?", "0 0 0 1 1 ? 2099" /* once */, "* * * 31 FEB ? *" /* never */)) { - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + { // using chron schedule + for (String schedule : List.of( + "0 0/15 * * * ?", + "0 0 1 * * ?", + "0 0 0 1 1 ? 2099" /* once */, + "* * * 31 FEB ? *" /* never */ + )) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", schedule), validationDisabledState); + } + + IllegalArgumentException e; + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); } - IllegalArgumentException e; + { // using time value + for (String interval : List.of("15m", "1h", "1d")) { + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), defaultState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationOneMinuteState); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", interval), validationDisabledState); + } - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), defaultState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [15m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0 0/1 * * * ?"), validationOneMinuteState); + IllegalArgumentException e; - e = expectThrows( - IllegalArgumentException.class, - () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationOneMinuteState) - ); - assertThat( - e.getMessage(), - equalTo("invalid schedule [0/30 0/1 * * * ?]: " + "schedule would be too frequent, executing more than every [1m]") - ); - SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "0/30 0/1 * * * ?"), validationDisabledState); + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), defaultState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [1m]: " + "schedule would be too frequent, executing more than every [15m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "1m"), validationOneMinuteState); + + e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationOneMinuteState) + ); + assertThat( + e.getMessage(), + equalTo("invalid schedule [30s]: " + "schedule would be too frequent, executing more than every [1m]") + ); + SnapshotLifecycleService.validateMinimumInterval(createPolicy("foo-1", "30s"), validationDisabledState); + } } public void testStoppedPriority() { @@ -485,6 +525,41 @@ public void submitUnbatchedStateUpdateTask(String source, ClusterStateUpdateTask } } + public void testValidateIntervalScheduleSupport() { + var featureService = new FeatureService(List.of(new SnapshotLifecycleFeatures())); + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state) + ); + assertThat(e.getMessage(), containsString("Unable to use slm interval schedules")); + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("30d", featureService, state); + } catch (Exception e) { + fail("interval schedule is supported by version and should not fail"); + } + } + { + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .nodeFeatures(Map.of("a", Set.of(), "b", Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE.id()))) + .build(); + try { + SnapshotLifecycleService.validateIntervalScheduleSupport("*/1 * * * * ?", featureService, state); + } catch (Exception e) { + fail("cron schedule does not need feature check and should not fail"); + } + } + } + class FakeSnapshotTask extends SnapshotLifecycleTask { private final Consumer onTriggered; @@ -515,7 +590,7 @@ public ClusterState createState(SnapshotLifecycleMetadata snapMeta, boolean loca } public static SnapshotLifecyclePolicy createPolicy(String id) { - return createPolicy(id, randomSchedule()); + return createPolicy(id, SnapshotLifecyclePolicyMetadataTests.randomSchedule()); } public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { @@ -534,8 +609,4 @@ public static SnapshotLifecyclePolicy createPolicy(String id, String schedule) { SnapshotRetentionConfiguration.EMPTY ); } - - public static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; - } } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java index dbb22f8dd49d8..877aa0ddb7342 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.slm.history.SnapshotHistoryStore; @@ -52,7 +53,7 @@ public void testJobsAreScheduled() throws InterruptedException { assertThat(service.getScheduler().jobCount(), equalTo(0)); service.onMaster(); - service.setUpdateSchedule(SnapshotLifecycleServiceTests.randomSchedule()); + service.setUpdateSchedule(SnapshotLifecyclePolicyMetadataTests.randomCronSchedule()); assertThat(service.getScheduler().scheduledJobIds(), containsInAnyOrder(SnapshotRetentionService.SLM_RETENTION_JOB_ID)); service.offMaster(); @@ -81,7 +82,7 @@ public void testManualTriggering() throws InterruptedException { try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); SnapshotRetentionService service = new SnapshotRetentionService(Settings.EMPTY, () -> new FakeRetentionTask(event -> { - assertThat(event.getJobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); + assertThat(event.jobName(), equalTo(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID)); invoked.incrementAndGet(); }), clock) ) { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index 71346ebc495d4..0fcc4b8007c6d 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -79,17 +80,17 @@ private TransformState processJSON(ReservedSnapshotAction action, TransformState } public void testDependencies() { - var action = new ReservedSnapshotAction(); + var action = new ReservedSnapshotAction(mock(FeatureService.class)); assertThat(action.optionalDependencies(), contains(ReservedRepositoryAction.NAME)); } - public void testValidationFails() { + public void testValidationFailsNeitherScheduleOrInterval() { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final ClusterName clusterName = new ClusterName("elasticsearch"); ClusterState state = ClusterState.builder(clusterName).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); TransformState prevState = new TransformState(state, Set.of()); String badPolicyJSON = """ @@ -117,6 +118,56 @@ public void testValidationFails() { ); } + public void testIntervalScheduleSupportValidation() { + Client client = mock(Client.class); + when(client.settings()).thenReturn(Settings.EMPTY); + final ClusterName clusterName = new ClusterName("elasticsearch"); + List repositoriesMetadata = List.of(new RepositoryMetadata("repo", "fs", Settings.EMPTY)); + Metadata.Builder mdBuilder = Metadata.builder(); + mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); + ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); + TransformState prevState = new TransformState(state, Set.of()); + String goodPolicyJSON = """ + { + "daily-snapshots": { + "schedule": "30d", + "name": "", + "repository": "repo", + "config": { + "indices": ["foo-*", "important"], + "ignore_unavailable": true, + "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 1, + "max_count": 50 + } + } + } + """; + + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(false); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + assertThat( + expectThrows(IllegalArgumentException.class, () -> processJSON(action, prevState, goodPolicyJSON)).getMessage(), + is("Error on validating SLM requests") + ); + } + { + FeatureService featureService = mock(FeatureService.class); + when(featureService.clusterHasFeature(any(), any())).thenReturn(true); + ReservedSnapshotAction action = new ReservedSnapshotAction(featureService); + try { + processJSON(action, prevState, goodPolicyJSON); + } catch (Exception e) { + fail("interval schedule with interval feature should pass validation"); + } + } + } + public void testActionAddRemove() throws Exception { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); @@ -128,7 +179,7 @@ public void testActionAddRemove() throws Exception { mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); - ReservedSnapshotAction action = new ReservedSnapshotAction(); + ReservedSnapshotAction action = new ReservedSnapshotAction(mock(FeatureService.class)); String emptyJSON = ""; @@ -362,7 +413,7 @@ public void testOperatorControllerFromJSONContent() throws IOException { null, List.of( new ReservedClusterSettingsAction(clusterSettings), - new ReservedSnapshotAction(), + new ReservedSnapshotAction(mock(FeatureService.class)), new ReservedRepositoryAction(repositoriesService) ) ); @@ -396,7 +447,8 @@ public void testPutSLMReservedStateHandler() throws Exception { mock(ClusterService.class), threadPool, mock(ActionFilters.class), - mock(IndexNameExpressionResolver.class) + mock(IndexNameExpressionResolver.class), + mock(FeatureService.class) ); assertThat(putAction.reservedStateHandlerName().get(), equalTo(ReservedSnapshotAction.NAME)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java index 750fdd40c12d6..211afe8e55a15 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; import org.junit.After; import org.junit.Before; @@ -194,10 +195,14 @@ public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); } } - return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), randomSchedule(), randomAlphaOfLength(4), config, null); - } - private static String randomSchedule() { - return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; + return new SnapshotLifecyclePolicy( + id, + randomAlphaOfLength(4), + SnapshotLifecyclePolicyMetadataTests.randomSchedule(), + randomAlphaOfLength(4), + config, + null + ); } } From e1dc59625f9081ba74c7502e4a0a0026dd3776fc Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Fri, 23 Aug 2024 02:21:11 -0500 Subject: [PATCH 031/352] SLM interval schedule followup - add back getFieldName style getters (#112123) Recent SLM interval change #110847 included changing two classes to records. This changed the getter methods from the form getFieldName() to fieldName(). Unfortunately, serverless expected the fieldName() form. Until serverless can be updated, we'll add back the getFieldName() style getters, in addition to the fieldName() getters, so as not to break the build. --- docs/changelog/112123.yaml | 5 +++ .../common/scheduler/SchedulerEngine.java | 34 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 docs/changelog/112123.yaml diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml new file mode 100644 index 0000000000000..0c0d7ac44cd17 --- /dev/null +++ b/docs/changelog/112123.yaml @@ -0,0 +1,5 @@ +pr: 112123 +summary: SLM interval schedule followup - add back `getFieldName` style getters +area: ILM+SLM +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java index ab63ab4062767..66b4f3c82e3cf 100644 --- a/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java +++ b/server/src/main/java/org/elasticsearch/common/scheduler/SchedulerEngine.java @@ -58,6 +58,23 @@ public record Job(String id, Schedule schedule, @Nullable Long fixedStartTime) { public Job(String id, Schedule schedule) { this(id, schedule, null); } + + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ + public String getId() { + return id; + } + + public Schedule getSchedule() { + return schedule; + } + + public Long getFixedStartTime() { + return fixedStartTime; + } } public record Event(String jobName, long triggeredTime, long scheduledTime) { @@ -65,6 +82,23 @@ public record Event(String jobName, long triggeredTime, long scheduledTime) { public String toString() { return "Event[jobName=" + jobName + "," + "triggeredTime=" + triggeredTime + "," + "scheduledTime=" + scheduledTime + "]"; } + + /** + * The following getters are redundant with the getters built in by the record. + * Unfortunately, getFieldName form getters are expected by serverless. + * These getters are being added back until serverless can be updated for the new getters. + */ + public String getJobName() { + return jobName; + } + + public long getTriggeredTime() { + return triggeredTime; + } + + public long getScheduledTime() { + return scheduledTime; + } } public interface Listener { From e0c1ccbc1e2d843f86324c4b888c77b37ce7f800 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:26:55 +0200 Subject: [PATCH 032/352] Make enrich cache based on memory usage (#111412) The max enrich cache size setting now also supports an absolute max size in bytes (of used heap space) and a percentage of the max heap space, next to the existing flat document count. The default is 1% of the max heap space. This should prevent issues where the enrich cache takes up a lot of memory when there are large documents in the cache. --- docs/changelog/111412.yaml | 6 ++ docs/reference/ingest/enrich.asciidoc | 20 ++++-- .../xpack/enrich/EnrichCache.java | 23 ++++++- .../xpack/enrich/EnrichPlugin.java | 65 ++++++++++++++++++- .../FlatNumberOrByteSizeValueTests.java | 59 +++++++++++++++++ 5 files changed, 162 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/111412.yaml create mode 100644 x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml new file mode 100644 index 0000000000000..297fa77cd2664 --- /dev/null +++ b/docs/changelog/111412.yaml @@ -0,0 +1,6 @@ +pr: 111412 +summary: Make enrich cache based on memory usage +area: Ingest Node +type: enhancement +issues: + - 106081 diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 6642cdc2a74ce..4bd50641149c0 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -230,12 +230,12 @@ Instead, you can: [[ingest-enrich-components]] ==== Enrich components -The enrich coordinator is a component that manages and performs the searches +The enrich coordinator is a component that manages and performs the searches required to enrich documents on each ingest node. It combines searches from all enrich processors in all pipelines into bulk <>. -The enrich policy executor is a component that manages the executions of all -enrich policies. When an enrich policy is executed, this component creates +The enrich policy executor is a component that manages the executions of all +enrich policies. When an enrich policy is executed, this component creates a new enrich index and removes the previous enrich index. The enrich policy executions are managed from the elected master node. The execution of these policies occurs on a different node. @@ -249,9 +249,15 @@ enrich policy executor. The enrich coordinator supports the following node settings: `enrich.cache_size`:: -Maximum number of searches to cache for enriching documents. Defaults to `1000`. -There is a single cache for all enrich processors in the cluster. This setting -determines the size of that cache. +Maximum size of the cache that caches searches for enriching documents. +The size can be specified in three units: the raw number of +cached searches (e.g. `1000`), an absolute size in bytes (e.g. `100Mb`), +or a percentage of the max heap space of the node (e.g. `1%`). +Both for the absolute byte size and the percentage of heap space, +{es} does not guarantee that the enrich cache size will adhere exactly to that maximum, +as {es} uses the byte size of the serialized search response +which is is a good representation of the used space on the heap, but not an exact match. +Defaults to `1%`. There is a single cache for all enrich processors in the cluster. `enrich.coordinator_proxy.max_concurrent_requests`:: Maximum number of concurrent <> to @@ -280,4 +286,4 @@ Maximum number of enrich policies to execute concurrently. Defaults to `50`. include::geo-match-enrich-policy-type-ex.asciidoc[] include::match-enrich-policy-type-ex.asciidoc[] -include::range-enrich-policy-type-ex.asciidoc[] \ No newline at end of file +include::range-enrich-policy-type-ex.asciidoc[] diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index 35c2071188864..0130bd5537a11 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -29,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import java.util.function.ToLongBiFunction; /** * A simple cache for enrich that uses {@link Cache}. There is one instance of this cache and @@ -61,12 +63,29 @@ public final class EnrichCache { this(maxSize, System::nanoTime); } + EnrichCache(ByteSizeValue maxByteSize) { + this(maxByteSize, System::nanoTime); + } + // non-private for unit testing only EnrichCache(long maxSize, LongSupplier relativeNanoTimeProvider) { this.relativeNanoTimeProvider = relativeNanoTimeProvider; - this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).removalListener(notification -> { + this.cache = createCache(maxSize, null); + } + + EnrichCache(ByteSizeValue maxByteSize, LongSupplier relativeNanoTimeProvider) { + this.relativeNanoTimeProvider = relativeNanoTimeProvider; + this.cache = createCache(maxByteSize.getBytes(), (key, value) -> value.sizeInBytes); + } + + private Cache createCache(long maxWeight, ToLongBiFunction weigher) { + var builder = CacheBuilder.builder().setMaximumWeight(maxWeight).removalListener(notification -> { sizeInBytes.getAndAdd(-1 * notification.getValue().sizeInBytes); - }).build(); + }); + if (weigher != null) { + builder.weigher(weigher); + } + return builder.build(); } /** diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 868ec49ff1d97..1a68ada60b6f1 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -12,17 +12,22 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SystemIndexPlugin; @@ -121,14 +126,29 @@ public class EnrichPlugin extends Plugin implements SystemIndexPlugin, IngestPlu return String.valueOf(maxConcurrentRequests * maxLookupsPerRequest); }, val -> Setting.parseInt(val, 1, Integer.MAX_VALUE, QUEUE_CAPACITY_SETTING_NAME), Setting.Property.NodeScope); - public static final Setting CACHE_SIZE = Setting.longSetting("enrich.cache_size", 1000, 0, Setting.Property.NodeScope); + public static final String CACHE_SIZE_SETTING_NAME = "enrich.cache.size"; + public static final Setting CACHE_SIZE = new Setting<>( + "enrich.cache.size", + (String) null, + (String s) -> FlatNumberOrByteSizeValue.parse( + s, + CACHE_SIZE_SETTING_NAME, + new FlatNumberOrByteSizeValue(ByteSizeValue.ofBytes((long) (0.01 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize()))) + ), + Setting.Property.NodeScope + ); private final Settings settings; private final EnrichCache enrichCache; public EnrichPlugin(final Settings settings) { this.settings = settings; - this.enrichCache = new EnrichCache(CACHE_SIZE.get(settings)); + FlatNumberOrByteSizeValue maxSize = CACHE_SIZE.get(settings); + if (maxSize.byteSizeValue() != null) { + this.enrichCache = new EnrichCache(maxSize.byteSizeValue()); + } else { + this.enrichCache = new EnrichCache(maxSize.flatNumber()); + } } @Override @@ -265,4 +285,45 @@ public String getFeatureName() { public String getFeatureDescription() { return "Manages data related to Enrich policies"; } + + /** + * A class that specifies either a flat (unit-less) number or a byte size value. + */ + public static class FlatNumberOrByteSizeValue { + + @Nullable + private final Long flatNumber; + @Nullable + private final ByteSizeValue byteSizeValue; + + public FlatNumberOrByteSizeValue(ByteSizeValue byteSizeValue) { + this.byteSizeValue = byteSizeValue; + this.flatNumber = null; + } + + public FlatNumberOrByteSizeValue(Long flatNumber) { + this.flatNumber = flatNumber; + this.byteSizeValue = null; + } + + public static FlatNumberOrByteSizeValue parse(String value, String settingName, FlatNumberOrByteSizeValue defaultValue) { + if (Strings.hasText(value) == false) { + return defaultValue; + } + if (Character.isDigit(value.charAt(value.length() - 1)) == false) { + return new FlatNumberOrByteSizeValue(MemorySizeValue.parseBytesSizeValueOrHeapRatio(value, settingName)); + } + return new FlatNumberOrByteSizeValue(Long.parseLong(value)); + } + + @Nullable + public ByteSizeValue byteSizeValue() { + return byteSizeValue; + } + + @Nullable + public Long flatNumber() { + return flatNumber; + } + } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java new file mode 100644 index 0000000000000..809b78c50b35a --- /dev/null +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/FlatNumberOrByteSizeValueTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.enrich; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.enrich.EnrichPlugin.FlatNumberOrByteSizeValue; + +public class FlatNumberOrByteSizeValueTests extends ESTestCase { + + private static final String SETTING_NAME = "test.setting"; + + public void testParse() { + int number = randomIntBetween(1, Integer.MAX_VALUE); + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(Integer.toString(number), SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "GB", SETTING_NAME, null) + ); + assertEquals( + new FlatNumberOrByteSizeValue(ByteSizeValue.ofGb(number)), + FlatNumberOrByteSizeValue.parse(number + "g", SETTING_NAME, null) + ); + int percentage = randomIntBetween(0, 100); + assertEquals( + new FlatNumberOrByteSizeValue( + ByteSizeValue.ofBytes((long) ((double) percentage / 100 * JvmInfo.jvmInfo().getConfiguredMaxHeapSize())) + ), + FlatNumberOrByteSizeValue.parse(percentage + "%", SETTING_NAME, null) + ); + assertEquals(new FlatNumberOrByteSizeValue(0L), FlatNumberOrByteSizeValue.parse("0", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0GB", SETTING_NAME, null)); + assertEquals(new FlatNumberOrByteSizeValue(ByteSizeValue.ZERO), FlatNumberOrByteSizeValue.parse("0%", SETTING_NAME, null)); + // Assert default value. + assertEquals( + new FlatNumberOrByteSizeValue((long) number), + FlatNumberOrByteSizeValue.parse(null, SETTING_NAME, new FlatNumberOrByteSizeValue((long) number)) + ); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GB%", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5%GB", SETTING_NAME, null)); + assertThrows(ElasticsearchParseException.class, () -> FlatNumberOrByteSizeValue.parse("5GBX", SETTING_NAME, null)); + } + + private void assertEquals(FlatNumberOrByteSizeValue expected, FlatNumberOrByteSizeValue actual) { + assertEquals(expected.byteSizeValue(), actual.byteSizeValue()); + assertEquals(expected.flatNumber(), actual.flatNumber()); + } +} From e46b5173a951ce6b33b93288e35e3ad50da9929b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 14:59:38 +0700 Subject: [PATCH 033/352] Minor cleanup of code in the org.elasticsearch.index.codec package. (#112125) * Removing unnesesary field * making inner class static * use enhaunced switch statement * removed commented out code. * made immutable fields final --- .../index/codec/PerFieldFormatSupplier.java | 4 --- .../codec/postings/ES812PostingsReader.java | 34 +++++++------------ .../index/codec/postings/ES812SkipReader.java | 4 +-- .../index/codec/postings/ES812SkipWriter.java | 8 ++--- .../codec/tsdb/ES87TSDBDocValuesProducer.java | 26 +++++--------- 5 files changed, 28 insertions(+), 48 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java index 1228c908f7c18..685e9774b04a7 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java @@ -24,8 +24,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; -import java.util.Objects; - /** * Class that encapsulates the logic of figuring out the most appropriate file format for a given field, across postings, doc values and * vectors. @@ -33,7 +31,6 @@ public class PerFieldFormatSupplier { private final MapperService mapperService; - private final BigArrays bigArrays; private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat(); private final KnnVectorsFormat knnVectorsFormat = new Lucene99HnswVectorsFormat(); private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; @@ -43,7 +40,6 @@ public class PerFieldFormatSupplier { public PerFieldFormatSupplier(MapperService mapperService, BigArrays bigArrays) { this.mapperService = mapperService; - this.bigArrays = Objects.requireNonNull(bigArrays); this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); this.es812PostingsFormat = new ES812PostingsFormat(); diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java index 1aada2a153c3c..3aaf2ee5a8c4b 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -874,10 +874,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - freq; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; @@ -1010,7 +1006,7 @@ final class BlockImpactsDocsEnum extends ImpactsEnum { final boolean indexHasFreqs; - private int docFreq; // number of docs in this posting list + private final int docFreq; // number of docs in this posting list private int blockUpto; // number of documents in or before the current block private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1211,8 +1207,8 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int doc; // doc we last read private long accum; // accumulator for doc deltas @@ -1228,19 +1224,19 @@ final class BlockImpactsPostingsEnum extends ImpactsEnum { private long posPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1507,8 +1503,8 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { final boolean indexHasOffsets; final boolean indexHasPayloads; - private int docFreq; // number of docs in this posting list - private long totalTermFreq; // number of positions in this posting list + private final int docFreq; // number of docs in this posting list + private final long totalTermFreq; // number of positions in this posting list private int docUpto; // how many docs we've read private int posDocUpTo; // for how many docs we've read positions, offsets, and payloads private int doc; // doc we last read @@ -1528,19 +1524,19 @@ final class BlockImpactsEverythingEnum extends ImpactsEnum { private long payPendingFP; // Where this term's postings start in the .doc file: - private long docTermStartFP; + private final long docTermStartFP; // Where this term's postings start in the .pos file: - private long posTermStartFP; + private final long posTermStartFP; // Where this term's payloads/offsets start in the .pay // file: - private long payTermStartFP; + private final long payTermStartFP; // File pointer where the last (vInt encoded) pos delta // block is. We need this to know whether to bulk // decode vs vInt decode the block: - private long lastPosBlockFP; + private final long lastPosBlockFP; private int nextSkipDoc = -1; @@ -1835,10 +1831,6 @@ public int advance(int target) throws IOException { private void skipPositions() throws IOException { // Skip positions now: int toSkip = posPendingCount - (int) freqBuffer[docBufferUpto - 1]; - // if (DEBUG) { - // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); - // } - final int leftInBlock = BLOCK_SIZE - posBufferUpto; if (toSkip < leftInBlock) { int end = posBufferUpto + toSkip; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java index f9b36114361ca..8dd99392625fd 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -54,8 +54,8 @@ *

Therefore, we'll trim df before passing it to the interface. see trim(int) */ class ES812SkipReader extends MultiLevelSkipListReader { - private long[] docPointer; - private long[] posPointer; + private final long[] docPointer; + private final long[] posPointer; private long[] payPointer; private int[] posBufferUpto; private int[] payloadByteUpto; diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java index dbfb7c86a1475..98c516fc890e8 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java @@ -51,8 +51,8 @@ * uptos(position, payload). 4. start offset. */ final class ES812SkipWriter extends MultiLevelSkipListWriter { - private int[] lastSkipDoc; - private long[] lastSkipDocPointer; + private final int[] lastSkipDoc; + private final long[] lastSkipDocPointer; private long[] lastSkipPosPointer; private long[] lastSkipPayPointer; @@ -66,7 +66,7 @@ final class ES812SkipWriter extends MultiLevelSkipListWriter { private long curPayPointer; private int curPosBufferUpto; private int curPayloadByteUpto; - private CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; + private final CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; private boolean fieldHasPositions; private boolean fieldHasOffsets; private boolean fieldHasPayloads; @@ -197,7 +197,7 @@ protected void writeSkipData(int level, DataOutput skipBuffer) throws IOExceptio } CompetitiveImpactAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level]; - assert competitiveFreqNorms.getCompetitiveFreqNormPairs().size() > 0; + assert competitiveFreqNorms.getCompetitiveFreqNormPairs().isEmpty() == false; if (level + 1 < numberOfSkipLevels) { curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms); } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index fb90327770674..b6e1bb503045c 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -342,14 +342,10 @@ public BytesRef lookupOrd(int ord) throws IOException { @Override public int lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return Math.toIntExact(termsEnum.ord()); - case NOT_FOUND: - case END: - default: - return Math.toIntExact(-1L - termsEnum.ord()); - } + return switch (status) { + case FOUND -> Math.toIntExact(termsEnum.ord()); + default -> Math.toIntExact(-1L - termsEnum.ord()); + }; } @Override @@ -384,14 +380,10 @@ public BytesRef lookupOrd(long ord) throws IOException { @Override public long lookupTerm(BytesRef key) throws IOException { TermsEnum.SeekStatus status = termsEnum.seekCeil(key); - switch (status) { - case FOUND: - return termsEnum.ord(); - case NOT_FOUND: - case END: - default: - return -1L - termsEnum.ord(); - } + return switch (status) { + case FOUND -> termsEnum.ord(); + default -> -1L - termsEnum.ord(); + }; } @Override @@ -400,7 +392,7 @@ public TermsEnum termsEnum() throws IOException { } } - private class TermsDict extends BaseTermsEnum { + private static class TermsDict extends BaseTermsEnum { static final int LZ4_DECOMPRESSOR_PADDING = 7; final TermsDictEntry entry; From 8325a7196bd4375a6a46ff57c7d4cd7af09074e4 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 11:11:47 +0300 Subject: [PATCH 034/352] Use StandardAnalyzer in MapperServiceTestCase (#112127) We currently use Lucene's `MockAnalyzer` that rarely injects some random payload to text fields. This leads to assert errors for synthetic source, where the roundtrip source (after printing and parsing the synthetic source) appears the same but there's a difference now in the FieldInfo for text mappers due to the injected payload. Fixes #112083 --- .../org/elasticsearch/index/mapper/MapperServiceTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 272901eb19351..7c11e7446e5c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -788,7 +788,8 @@ protected TriFunction, MappedFieldType.F } protected RandomIndexWriter indexWriterForSyntheticSource(Directory directory) throws IOException { - return new RandomIndexWriter(random(), directory); + // MockAnalyzer (rarely) produces random payloads that lead to failures during assertReaderEquals. + return new RandomIndexWriter(random(), directory, new StandardAnalyzer()); } protected final String syntheticSource(DocumentMapper mapper, CheckedConsumer build) throws IOException { From 92d25c157a6e74a5b3b5a40928957bc311884040 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Fri, 23 Aug 2024 12:46:52 +0400 Subject: [PATCH 035/352] Fix id and routing types in indices.split YAML tests (#112059) --- .../40_routing_partition_size.yml | 72 +++++++++---------- .../indices.split/50_routing_required.yml | 48 ++++++------- 2 files changed, 60 insertions(+), 60 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml index 80a8ccf0d1063..11ffbe1d8464d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/40_routing_partition_size.yml @@ -16,22 +16,22 @@ more than 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -66,8 +66,8 @@ more than 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -76,8 +76,8 @@ more than 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -86,8 +86,8 @@ more than 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -117,22 +117,22 @@ exactly 1: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -167,8 +167,8 @@ exactly 1: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -177,8 +177,8 @@ exactly 1: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -187,8 +187,8 @@ exactly 1: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -221,22 +221,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -271,8 +271,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -281,8 +281,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -291,8 +291,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml index 38bf9d72ef8ff..4c8d7736631c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.split/50_routing_required.yml @@ -15,22 +15,22 @@ routing required: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world" } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -65,8 +65,8 @@ routing required: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -75,8 +75,8 @@ routing required: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -85,8 +85,8 @@ routing required: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } @@ -122,22 +122,22 @@ nested: - do: index: index: source - id: 1 - routing: 1 + id: "1" + routing: "1" body: { "foo": "hello world", "n": [{"foo": "goodbye world"}, {"foo": "more words"}] } - do: index: index: source - id: 2 - routing: 2 + id: "2" + routing: "2" body: { "foo": "hello world 2" } - do: index: index: source - id: 3 - routing: 3 + id: "3" + routing: "3" body: { "foo": "hello world 3" } # make it read-only @@ -172,8 +172,8 @@ nested: - do: get: index: target - routing: 1 - id: 1 + routing: "1" + id: "1" - match: { _index: target } - match: { _id: "1" } @@ -182,8 +182,8 @@ nested: - do: get: index: target - routing: 2 - id: 2 + routing: "2" + id: "2" - match: { _index: target } - match: { _id: "2" } @@ -192,8 +192,8 @@ nested: - do: get: index: target - routing: 3 - id: 3 + routing: "3" + id: "3" - match: { _index: target } - match: { _id: "3" } From 34a78f3cf3e91cd13f51f1f4f8e378f8ed244a2b Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Fri, 23 Aug 2024 11:49:15 +0300 Subject: [PATCH 036/352] Add documentation to deprecate the global retention privileges. (#112020) --- docs/reference/security/authorization/privileges.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index f15654bef2d1f..747b1eef40441 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -101,6 +101,9 @@ deprecated[7.5] Use `manage_transform` instead. + This privilege is not available in {serverless-full}. +`manage_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `manage_enrich`:: All operations related to managing and executing enrich policies. @@ -223,6 +226,9 @@ security roles of the user who created or updated them. All cluster read-only operations, like cluster health and state, hot threads, node info, node and cluster stats, and pending cluster tasks. +`monitor_data_stream_global_retention`:: +This privilege has no effect.deprecated[8.16] + `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. From d6d305805608e203d5b1d1a1308454cecb4ac2ac Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 17:17:05 +0700 Subject: [PATCH 037/352] Fix synthetic source NestedObjectMapper assertion. (#112131) The parentDoc parameter can be -1 and the assertion needs to take this into account (just like the next line is doing). Closes #111998 --- .../java/org/elasticsearch/index/mapper/NestedObjectMapper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index d866b3c78173b..f61f91250516a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -441,7 +441,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf } private List collectChildren(int parentDoc, BitSet parentDocs, DocIdSetIterator childIt) throws IOException { - assert parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; + assert parentDoc < 0 || parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; final int prevParentDoc = parentDoc > 0 ? parentDocs.prevSetBit(parentDoc - 1) : -1; int childDocId = childIt.docID(); if (childDocId <= prevParentDoc) { From 2b1170509b39b5a23c00bbfa9da87144868a5df2 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 23 Aug 2024 17:17:54 +0700 Subject: [PATCH 038/352] Change subobjects yaml tests to use composable index templates. (#112129) Currently the legacy templates are being used which are deprecated. --- .../test/index/91_metrics_no_subobjects.yml | 80 ++++++++++--------- .../test/index/92_metrics_auto_subobjects.yml | 80 ++++++++++--------- 2 files changed, 84 insertions(+), 76 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index 94c19a4d69e17..ca6d65349c923 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -6,20 +6,21 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -70,15 +71,16 @@ reason: added in 8.3.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -129,22 +131,23 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -196,17 +199,18 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: false - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: false + properties: + host.name: + type: keyword - do: allowed_warnings_regex: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index 984c1c22b2177..e4fee3569fef2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -6,20 +6,21 @@ reason: requires supporting subobjects auto setting - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -70,15 +71,16 @@ reason: requires supporting subobjects auto setting - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -129,22 +131,23 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - dynamic_templates: - - no_subobjects: - match: metrics - mapping: - type: object - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + dynamic_templates: + - no_subobjects: + match: metrics + mapping: + type: object + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: @@ -196,17 +199,18 @@ reason: added in 8.4.0 - do: - indices.put_template: + indices.put_index_template: name: test body: index_patterns: test-* - mappings: - _source: - mode: synthetic - subobjects: auto - properties: - host.name: - type: keyword + template: + mappings: + _source: + mode: synthetic + subobjects: auto + properties: + host.name: + type: keyword - do: allowed_warnings_regex: From db0cc8122922fe7930199da889ea53fd72b30220 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Fri, 23 Aug 2024 14:02:14 +0200 Subject: [PATCH 039/352] Add support for spatial relationships in point field mapper (#112126) Lucene only supports intersects queries over XYPoint fields but it is still possible to represent all the spatial relationships using just that query. --- docs/changelog/112126.yaml | 5 + .../search/ShapeQueryOverPointTests.java | 128 +------ .../spatial/search/ShapeQueryTestCase.java | 311 ++++++++++-------- .../index/mapper/PointFieldMapper.java | 3 +- .../index/query/ShapeQueryPointProcessor.java | 278 ++++++++++++++-- .../ShapeQueryBuilderOverPointTests.java | 15 +- 6 files changed, 432 insertions(+), 308 deletions(-) create mode 100644 docs/changelog/112126.yaml diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml new file mode 100644 index 0000000000000..f6a7aeb893a5e --- /dev/null +++ b/docs/changelog/112126.yaml @@ -0,0 +1,5 @@ +pr: 112126 +summary: Add support for spatial relationships in point field mapper +area: Geo +type: enhancement +issues: [] diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java index 0563c8f281cb8..f4ee7f264d4f7 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverPointTests.java @@ -6,141 +6,23 @@ */ package org.elasticsearch.xpack.spatial.search; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.geometry.GeometryCollection; -import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; -import org.elasticsearch.geometry.MultiLine; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.ShapeType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; -import org.hamcrest.CoreMatchers; - -import java.util.List; public class ShapeQueryOverPointTests extends ShapeQueryTestCase { @Override protected XContentBuilder createDefaultMapping() throws Exception { - XContentBuilder xcb = XContentFactory.jsonBuilder() + final boolean isIndexed = randomBoolean(); + final boolean hasDocValues = isIndexed == false || randomBoolean(); + return XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject(defaultFieldName) .field("type", "point") + .field("index", isIndexed) + .field("doc_values", hasDocValues) .endObject() .endObject() .endObject(); - - return xcb; - } - - public void testProcessRelationSupport() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Rectangle rectangle = new Rectangle(-35, -25, -25, -35); - - for (ShapeRelation shapeRelation : ShapeRelation.values()) { - if (shapeRelation.equals(ShapeRelation.INTERSECTS) == false) { - SearchPhaseExecutionException e = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch("test") - .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(shapeRelation)) - .get() - ); - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString(shapeRelation + " query relation not supported for Field [" + defaultFieldName + "]") - ); - } - } - } - - public void testQueryLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line line = new Line(new double[] { -25, -25 }, new double[] { -35, -35 }); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, line)).get(); - } catch (SearchPhaseExecutionException e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.LINESTRING + " queries")); - } - } - - public void testQueryLinearRing() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - LinearRing linearRing = new LinearRing(new double[] { -25, -35, -25 }, new double[] { -25, -35, -25 }); - - IllegalArgumentException ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, linearRing) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - - ex = expectThrows( - IllegalArgumentException.class, - () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) - ); - assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); - } - - public void testQueryMultiLine() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Line lsb1 = new Line(new double[] { -35, -25 }, new double[] { -35, -25 }); - Line lsb2 = new Line(new double[] { -15, -5 }, new double[] { -15, -5 }); - - MultiLine multiline = new MultiLine(List.of(lsb1, lsb2)); - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiline)).get(); - } catch (Exception e) { - assertThat( - e.getCause().getMessage(), - CoreMatchers.containsString("does not support " + ShapeType.MULTILINESTRING + " queries") - ); - } - } - - public void testQueryMultiPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - MultiPoint multiPoint = new MultiPoint(List.of(new Point(-35, -25), new Point(-15, -5))); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.MULTIPOINT + " queries")); - } } - - public void testQueryPoint() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate("test").setMapping(mapping).get(); - ensureGreen(); - - Point point = new Point(-35, -2); - - try { - client().prepareSearch("test").setQuery(new ShapeQueryBuilder(defaultFieldName, point)).get(); - } catch (Exception e) { - assertThat(e.getCause().getMessage(), CoreMatchers.containsString("does not support " + ShapeType.POINT + " queries")); - } - } - } diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java index 38d0a30b593b6..1ac6bf3b6fd31 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryTestCase.java @@ -7,16 +7,18 @@ package org.elasticsearch.xpack.spatial.search; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; import org.elasticsearch.geometry.MultiPolygon; import org.elasticsearch.geometry.Point; import org.elasticsearch.geometry.Polygon; import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -26,6 +28,7 @@ import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder; +import org.hamcrest.CoreMatchers; import java.util.Collection; import java.util.List; @@ -35,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -46,29 +50,18 @@ protected Collection> getPlugins() { return pluginList(LocalStateSpatialPlugin.class, LocalStateCompositeXPackPlugin.class); } - protected abstract XContentBuilder createDefaultMapping() throws Exception; - - static String defaultFieldName = "xy"; - static String defaultIndexName = "test-points"; + @Override + public void setUp() throws Exception { + super.setUp(); - public void testNullShape() throws Exception { String mapping = Strings.toString(createDefaultMapping()); indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); ensureGreen(); prepareIndex(defaultIndexName).setId("aNullshape") - .setSource("{\"geo\": null}", XContentType.JSON) + .setSource("{\"" + defaultFieldName + "\": null}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE) .get(); - GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); - assertThat(result.getField("location"), nullValue()); - }; - - public void testIndexPointsFilterRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - prepareIndex(defaultIndexName).setId("1") .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) .setRefreshPolicy(IMMEDIATE) @@ -78,74 +71,82 @@ public void testIndexPointsFilterRectangle() throws Exception { .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + prepareIndex(defaultIndexName).setId("3") + .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(50 50)").endObject()) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("4") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 4") + .field(defaultFieldName, new String[] { "POINT(-30 -30)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + prepareIndex(defaultIndexName).setId("5") + .setSource( + jsonBuilder().startObject() + .field("name", "Document 5") + .field(defaultFieldName, new String[] { "POINT(60 60)", "POINT(50 50)" }) + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + } + + protected abstract XContentBuilder createDefaultMapping() throws Exception; + + static String defaultFieldName = "xy"; + static String defaultIndexName = "test-points"; + public void testNullShape() { + GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); + assertThat(result.getField(defaultFieldName), nullValue()); + }; + + public void testIndexPointsFilterRectangle() { Rectangle rectangle = new Rectangle(-45, 45, 45, -45); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); // default query, without specifying relation (expect intersects) - assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName).setQuery(new ShapeQueryBuilder(defaultFieldName, rectangle)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsCircle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsCircle() { Circle circle = new Circle(-30, -30, 1); assertNoFailuresAndResponse( client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + assertThat(response.getHits().getHits().length, equalTo(2)); + assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsPolygon() { Polygon polygon = new Polygon(new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 })); assertNoFailuresAndResponse( @@ -153,32 +154,14 @@ public void testIndexPointsPolygon() throws Exception { .setQuery(new ShapeQueryBuilder(defaultFieldName, polygon).relation(ShapeRelation.INTERSECTS)), response -> { SearchHits searchHits = response.getHits(); - assertThat(searchHits.getTotalHits().value, equalTo(1L)); - assertThat(searchHits.getAt(0).getId(), equalTo("1")); + assertThat(searchHits.getTotalHits().value, equalTo(2L)); + assertThat(searchHits.getAt(0).getId(), anyOf(equalTo("1"), equalTo("4"))); + assertThat(searchHits.getAt(1).getId(), anyOf(equalTo("1"), equalTo("4"))); } ); } - public void testIndexPointsMultiPolygon() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-40 -40)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("3") - .setSource(jsonBuilder().startObject().field("name", "Document 3").field(defaultFieldName, "POINT(-50 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsMultiPolygon() { Polygon encloseDocument1Shape = new Polygon( new LinearRing(new double[] { -35, -35, -25, -25, -35 }, new double[] { -35, -25, -25, -35, -35 }) ); @@ -192,29 +175,16 @@ public void testIndexPointsMultiPolygon() throws Exception { client().prepareSearch(defaultIndexName) .setQuery(new ShapeQueryBuilder(defaultFieldName, mp).relation(ShapeRelation.INTERSECTS)), response -> { - assertThat(response.getHits().getTotalHits().value, equalTo(2L)); - assertThat(response.getHits().getHits().length, equalTo(2)); - assertThat(response.getHits().getAt(0).getId(), not(equalTo("2"))); - assertThat(response.getHits().getAt(1).getId(), not(equalTo("2"))); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getAt(0).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(1).getId(), not(equalTo("3"))); + assertThat(response.getHits().getAt(2).getId(), not(equalTo("3"))); } ); } - public void testIndexPointsRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("1") - .setSource(jsonBuilder().startObject().field("name", "Document 1").field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("2") - .setSource(jsonBuilder().startObject().field("name", "Document 2").field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - + public void testIndexPointsRectangle() { Rectangle rectangle = new Rectangle(-50, -40, -45, -55); assertNoFailuresAndResponse( @@ -229,20 +199,6 @@ public void testIndexPointsRectangle() throws Exception { } public void testIndexPointsIndexedRectangle() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); - indicesAdmin().prepareCreate(defaultIndexName).setMapping(mapping).get(); - ensureGreen(); - - prepareIndex(defaultIndexName).setId("point1") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-30 -30)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - - prepareIndex(defaultIndexName).setId("point2") - .setSource(jsonBuilder().startObject().field(defaultFieldName, "POINT(-45 -50)").endObject()) - .setRefreshPolicy(IMMEDIATE) - .get(); - String indexedShapeIndex = "indexed_query_shapes"; String indexedShapePath = "shape"; String queryShapesMapping = Strings.toString( @@ -278,7 +234,7 @@ public void testIndexPointsIndexedRectangle() throws Exception { response -> { assertThat(response.getHits().getTotalHits().value, equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getId(), equalTo("point2")); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); } ); @@ -291,53 +247,122 @@ public void testIndexPointsIndexedRectangle() throws Exception { ), 0L ); + } - public void testDistanceQuery() throws Exception { - indicesAdmin().prepareCreate("test_distance").setMapping("location", "type=shape").get(); - ensureGreen(); + public void testDistanceQuery() { + Circle circle = new Circle(-25, -25, 10); - Circle circle = new Circle(1, 0, 10); - - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(2, 2))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(3, 1))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(-20, -30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); - client().index( - new IndexRequest("test_distance").source( - jsonBuilder().startObject().field("location", WellKnownText.toWKT(new Point(20, 30))).endObject() - ).setRefreshPolicy(IMMEDIATE) - ).actionGet(); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.INTERSECTS)), + 2L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.WITHIN)), + 1L + ); + + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.DISJOINT)), + 3L + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.WITHIN)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, circle).relation(ShapeRelation.CONTAINS)), + 0L + ); + } + + public void testIndexPointsQueryLinearRing() { + LinearRing linearRing = new LinearRing(new double[] { -50, -50 }, new double[] { 50, 50 }); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, linearRing) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + + ex = expectThrows( + IllegalArgumentException.class, + () -> new ShapeQueryBuilder(defaultFieldName, new GeometryCollection<>(List.of(linearRing))) + ); + assertThat(ex.getMessage(), CoreMatchers.containsString("[LINEARRING] geometries are not supported")); + } + + public void testIndexPointsQueryLine() { + Line line = new Line(new double[] { 100, -30 }, new double[] { -100, -30 }); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, line).relation(ShapeRelation.INTERSECTS)), 2L ); + } + + public void testIndexPointsQueryMultiLine() { + MultiLine multiLine = new MultiLine( + List.of( + new Line(new double[] { 100, -30 }, new double[] { -100, -30 }), + new Line(new double[] { 100, -20 }, new double[] { -100, -20 }) + ) + ); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.INTERSECTS)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiLine).relation(ShapeRelation.INTERSECTS)), 2L ); + } + public void testIndexPointsQueryPoint() { + Point point = new Point(-30, -30); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.DISJOINT)), + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.INTERSECTS)), 2L ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.WITHIN)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.CONTAINS)), + 2L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, point).relation(ShapeRelation.DISJOINT)), + 3L + ); + } + public void testIndexPointsQueryMultiPoint() { + MultiPoint multiPoint = new MultiPoint(List.of(new Point(-30, -30), new Point(50, 50))); assertHitCount( - client().prepareSearch("test_distance").setQuery(new ShapeQueryBuilder("location", circle).relation(ShapeRelation.CONTAINS)), - 0L + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.INTERSECTS)), + 4L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.WITHIN)), + 3L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.CONTAINS)), + 1L + ); + assertHitCount( + client().prepareSearch(defaultIndexName) + .setQuery(new ShapeQueryBuilder(defaultFieldName, multiPoint).relation(ShapeRelation.DISJOINT)), + 1L ); } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index d98fe7fdfc6ec..9412dc3c5eb53 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -215,7 +215,8 @@ public String typeName() { @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - return queryProcessor.shapeQuery(shape, fieldName, relation, context); + failIfNotIndexedNorDocValuesFallback(context); + return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java index a8c084e7e0f01..22616eabf8211 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java @@ -8,50 +8,272 @@ import org.apache.lucene.document.XYDocValuesField; import org.apache.lucene.document.XYPointField; +import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; +import org.apache.lucene.geo.XYPoint; +import org.apache.lucene.geo.XYRectangle; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.ShapeType; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; -import java.util.function.Consumer; +import java.util.Arrays; +/** Generates a lucene query for a spatial query over a point field. + * + * Note that lucene only supports intersects spatial relation so we build other relations + * using just that one. + * */ public class ShapeQueryPointProcessor { - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - final boolean hasDocValues = validateIsPointFieldType(fieldName, context); - // only the intersects relation is supported for indexed cartesian point types - if (relation != ShapeRelation.INTERSECTS) { - throw new QueryShardException(context, relation + " query relation not supported for Field [" + fieldName + "]."); - } - final Consumer checker = t -> { - if (t == ShapeType.POINT || t == ShapeType.MULTIPOINT || t == ShapeType.LINESTRING || t == ShapeType.MULTILINESTRING) { - throw new QueryShardException(context, "Field [" + fieldName + "] does not support " + t + " queries"); - } + public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean isIndexed, boolean hasDocValues) { + assert isIndexed || hasDocValues; + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + // XYPointField only supports intersects query so we build all the relationships using that logic. + // it is not very efficient but it works. + return switch (relation) { + case INTERSECTS -> buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case DISJOINT -> buildDisjointQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case CONTAINS -> buildContainsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case WITHIN -> buildWithinQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); }; - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, checker); - Query query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); + } + + private static Query buildIntersectsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // This is supported natively in lucene + Query query; + if (isIndexed) { + query = XYPointField.newGeometryQuery(fieldName, luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); } return query; } - private boolean validateIsPointFieldType(String fieldName, SearchExecutionContext context) { - MappedFieldType fieldType = context.getFieldType(fieldName); - if (fieldType instanceof PointFieldMapper.PointFieldType == false) { - throw new QueryShardException( - context, - "Expected " + PointFieldMapper.CONTENT_TYPE + " field type for Field [" + fieldName + "] but found " + fieldType.typeName() + private static Query buildDisjointQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // first collect all the documents that contain a shape + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + if (hasDocValues) { + builder.add(new FieldExistsQuery(fieldName), BooleanClause.Occur.FILTER); + } else { + builder.add( + buildIntersectsQuery( + fieldName, + isIndexed, + hasDocValues, + new XYRectangle(-Float.MAX_VALUE, Float.MAX_VALUE, -Float.MAX_VALUE, Float.MAX_VALUE) + ), + BooleanClause.Occur.FILTER ); } - return fieldType.hasDocValues(); + // then remove all intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.MUST_NOT); + return builder.build(); + } + + private static Query buildContainsQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + // for non-point data the result is always false + if (allPoints(luceneGeometries) == false) { + return new MatchNoDocsQuery(); + } + // for a unique point, it behaves like intersect + if (luceneGeometries.length == 1) { + return buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + } + // for a multi point, all points needs to be in the document + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (XYGeometry geometry : luceneGeometries) { + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, geometry), BooleanClause.Occur.FILTER); + } + return builder.build(); + } + + private static Query buildWithinQuery(String fieldName, boolean isIndexed, boolean hasDocValues, XYGeometry... luceneGeometries) { + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + // collect all the intersecting documents + builder.add(buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries), BooleanClause.Occur.FILTER); + // This is the tricky part as we need to remove all documents that they have at least one disjoint point. + // In order to do that, we introduce a InverseXYGeometry which return all documents that have at least one disjoint point + // with the original geometry. + builder.add( + buildIntersectsQuery(fieldName, isIndexed, hasDocValues, new InverseXYGeometry(luceneGeometries)), + BooleanClause.Occur.MUST_NOT + ); + return builder.build(); + } + + private static boolean allPoints(XYGeometry[] geometries) { + return Arrays.stream(geometries).allMatch(g -> g instanceof XYPoint); + } + + private static class InverseXYGeometry extends XYGeometry { + private final XYGeometry[] geometries; + + InverseXYGeometry(XYGeometry... geometries) { + this.geometries = geometries; + } + + @Override + protected Component2D toComponent2D() { + final Component2D component2D = XYGeometry.create(geometries); + return new Component2D() { + @Override + public double getMinX() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxX() { + return Float.MAX_VALUE; + } + + @Override + public double getMinY() { + return -Float.MAX_VALUE; + } + + @Override + public double getMaxY() { + return Float.MAX_VALUE; + } + + @Override + public boolean contains(double x, double y) { + return component2D.contains(x, y) == false; + } + + @Override + public PointValues.Relation relate(double minX, double maxX, double minY, double maxY) { + PointValues.Relation relation = component2D.relate(minX, maxX, minY, maxY); + return switch (relation) { + case CELL_INSIDE_QUERY -> PointValues.Relation.CELL_OUTSIDE_QUERY; + case CELL_OUTSIDE_QUERY -> PointValues.Relation.CELL_INSIDE_QUERY; + case CELL_CROSSES_QUERY -> PointValues.Relation.CELL_CROSSES_QUERY; + }; + } + + @Override + public boolean intersectsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean intersectsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + double bX, + double bY, + double cX, + double cY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinPoint(double x, double y) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinLine( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY + ) { + throw new UnsupportedOperationException(); + } + + @Override + public WithinRelation withinTriangle( + double minX, + double maxX, + double minY, + double maxY, + double aX, + double aY, + boolean ab, + double bX, + double bY, + boolean bc, + double cX, + double cY, + boolean ca + ) { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + InverseXYGeometry that = (InverseXYGeometry) o; + return Arrays.equals(geometries, that.geometries); + } + + @Override + public int hashCode() { + return Arrays.hashCode(geometries); + } } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java index db67b1f1e998b..05756168991c9 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilderOverPointTests.java @@ -30,22 +30,11 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws @Override protected ShapeRelation getShapeRelation(ShapeType type) { - return ShapeRelation.INTERSECTS; + return randomFrom(ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS, ShapeRelation.DISJOINT, ShapeRelation.WITHIN); } @Override protected Geometry getGeometry() { - if (randomBoolean()) { - if (randomBoolean()) { - return ShapeTestUtils.randomMultiPolygon(false); - } else { - return ShapeTestUtils.randomPolygon(false); - } - } else if (randomBoolean()) { - // it should be a circle - return ShapeTestUtils.randomPolygon(false); - } else { - return ShapeTestUtils.randomRectangle(); - } + return ShapeTestUtils.randomGeometry(false); } } From 3ab163450916be7f39158691517a788fdf7cc391 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 22:32:20 +1000 Subject: [PATCH 040/352] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT org.elasticsearch.test.rest.ClientYamlTestSuiteIT #112143 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ec097616c2af6..20cf821f68c5e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,6 +182,8 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} issue: https://github.com/elastic/elasticsearch/issues/112118 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112143 # Examples: # From 915528c00e9f5de9f74c86c2e775b96c60eced7f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:08:09 +1000 Subject: [PATCH 041/352] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=transform/preview_transforms/Test preview transform latest} #112144 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 20cf821f68c5e..f57c3dbcc2a6d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -184,6 +184,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112118 - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112143 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/preview_transforms/Test preview transform latest} + issue: https://github.com/elastic/elasticsearch/issues/112144 # Examples: # From bcad4f0d24772a34b25b77b3abc58687c5a1df69 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 23 Aug 2024 23:59:44 +1000 Subject: [PATCH 042/352] Mute org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT #112147 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f57c3dbcc2a6d..463075f1f93ae 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -187,6 +187,8 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/preview_transforms/Test preview transform latest} issue: https://github.com/elastic/elasticsearch/issues/112144 +- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/112147 # Examples: # From 9847a315fce870ff9288c7bfe86a00ba0f40013b Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Fri, 23 Aug 2024 10:41:03 -0400 Subject: [PATCH 043/352] Semantic reranking should fail whenever inference ID does not exist (#112038) * Semantic reranking should fail whenever inference ID does not exist * Short circuit text similarity reranking on empty result set * Update tests * Remove test - it doesn't do anything useful * Update docs/changelog/112038.yaml --- docs/changelog/112038.yaml | 6 ++ ...ankFeaturePhaseRankCoordinatorContext.java | 16 ++--- ...ankFeaturePhaseRankCoordinatorContext.java | 19 ++++-- ...aturePhaseRankCoordinatorContextTests.java | 19 ++++++ .../70_text_similarity_rank_retriever.yml | 67 +++++++++++++++++-- 5 files changed, 104 insertions(+), 23 deletions(-) create mode 100644 docs/changelog/112038.yaml diff --git a/docs/changelog/112038.yaml b/docs/changelog/112038.yaml new file mode 100644 index 0000000000000..6cbfb373b7420 --- /dev/null +++ b/docs/changelog/112038.yaml @@ -0,0 +1,6 @@ +pr: 112038 +summary: Semantic reranking should fail whenever inference ID does not exist +area: Relevance +type: bug +issues: + - 111934 diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java index 02834f03f54ab..9faa5e4e4450c 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/RankFeaturePhaseRankCoordinatorContext.java @@ -74,16 +74,12 @@ public void computeRankScoresForGlobalResults( RankFeatureDoc[] featureDocs = extractFeatureDocs(rankSearchResults); // generate the final `topResults` results, and pass them to fetch phase through the `rankListener` - if (featureDocs.length == 0) { - rankListener.onResponse(new RankFeatureDoc[0]); - } else { - computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { - for (int i = 0; i < featureDocs.length; i++) { - featureDocs[i].score = scores[i]; - } - listener.onResponse(featureDocs); - })); - } + computeScores(featureDocs, rankListener.delegateFailureAndWrap((listener, scores) -> { + for (int i = 0; i < featureDocs.length; i++) { + featureDocs[i].score = scores[i]; + } + listener.onResponse(featureDocs); + })); } /** diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java index 42413c35fcbff..cad11cbdc9d5b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContext.java @@ -62,6 +62,7 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener rankedDocs = ((RankedDocsResults) results).getRankedDocs(); + if (rankedDocs.size() != featureDocs.length) { l.onFailure( new IllegalStateException( @@ -104,12 +105,18 @@ protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); - InferenceAction.Request inferenceRequest = generateRequest(featureData); - try { - client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); - } finally { - inferenceRequest.decRef(); + + // Short circuit on empty results after request validation + if (featureDocs.length == 0) { + inferenceListener.onResponse(new InferenceAction.Response(new RankedDocsResults(List.of()))); + } else { + List featureData = Arrays.stream(featureDocs).map(x -> x.featureData).toList(); + InferenceAction.Request inferenceRequest = generateRequest(featureData); + try { + client.execute(InferenceAction.INSTANCE, inferenceRequest, inferenceListener); + } finally { + inferenceRequest.decRef(); + } } }); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java index 2e9be42b5c5d4..d6c476cdc15d6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankFeaturePhaseRankCoordinatorContextTests.java @@ -61,4 +61,23 @@ public void onFailure(Exception e) { ); } + public void testComputeScoresForEmpty() { + subject.computeScores(new RankFeatureDoc[0], new ActionListener<>() { + @Override + public void onResponse(float[] floats) { + assertArrayEquals(new float[0], floats, 0.0f); + } + + @Override + public void onFailure(Exception e) { + fail(); + } + }); + verify(mockClient).execute( + eq(GetInferenceModelAction.INSTANCE), + argThat(actionRequest -> ((GetInferenceModelAction.Request) actionRequest).getTaskType().equals(TaskType.RERANK)), + any() + ); + } + } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index 6d3c1231440fb..530be2341c9c8 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -38,8 +38,8 @@ setup: id: doc_1 body: text: "As seen from Earth, a solar eclipse happens when the Moon is directly between the Earth and the Sun." - topic: ["science"] - subtopic: ["technology"] + topic: [ "science" ] + subtopic: [ "technology" ] refresh: true - do: @@ -48,8 +48,8 @@ setup: id: doc_2 body: text: "The phases of the Moon come from the position of the Moon relative to the Earth and Sun." - topic: ["science"] - subtopic: ["astronomy"] + topic: [ "science" ] + subtopic: [ "astronomy" ] refresh: true - do: @@ -58,7 +58,7 @@ setup: id: doc_3 body: text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." - topic: ["geography"] + topic: [ "geography" ] refresh: true --- "Simple text similarity rank retriever": @@ -82,7 +82,7 @@ setup: field: text size: 10 - - match: { hits.total.value : 2 } + - match: { hits.total.value: 2 } - length: { hits.hits: 2 } - match: { hits.hits.0._id: "doc_2" } @@ -118,9 +118,62 @@ setup: field: text size: 10 - - match: { hits.total.value : 1 } + - match: { hits.total.value: 1 } - length: { hits.hits: 1 } - match: { hits.hits.0._id: "doc_1" } - match: { hits.hits.0._rank: 1 } - close_to: { hits.hits.0._score: { value: 0.2, error: 0.001 } } + + +--- +"Text similarity reranking fails if the inference ID does not exist": + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "science" + filter: + term: + subtopic: "technology" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "How often does the moon hide the sun?" + field: text + size: 10 + +--- +"Text similarity reranking fails if the inference ID does not exist and result set is empty": + - requires: + cluster_features: "gte_v8.15.1" + reason: bug fixed in 8.15.1 + + - do: + catch: /Inference endpoint not found/ + search: + index: test-index + body: + track_total_hits: true + fields: [ "text", "topic" ] + retriever: + text_similarity_reranker: + retriever: + standard: + query: + term: + topic: "asdfasdf" + rank_window_size: 10 + inference_id: i-dont-exist + inference_text: "asdfasdf" + field: text + size: 10 + From 3c92797d0d323de25116c4c0a6d3758f5b1b37ac Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:53:07 +0300 Subject: [PATCH 044/352] Allow warnings for template conflicts (#112145) Fixes #112143 --- .../test/index/91_metrics_no_subobjects.yml | 16 ++++++++++++---- .../test/index/92_metrics_auto_subobjects.yml | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml index ca6d65349c923..5881ec83ebe85 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/91_metrics_no_subobjects.yml @@ -1,11 +1,13 @@ --- "Metrics object indexing": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -66,11 +68,13 @@ --- "Root without subobjects": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.3.0"] reason: added in 8.3.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -126,11 +130,13 @@ --- "Metrics object indexing with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -194,11 +200,13 @@ --- "Root without subobjects with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["gte_v8.4.0"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index e4fee3569fef2..414c24cfffd7d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -1,11 +1,13 @@ --- "Metrics object indexing": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -66,11 +68,13 @@ --- "Root with metrics": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -126,11 +130,13 @@ --- "Metrics object indexing with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: @@ -194,11 +200,13 @@ --- "Root without subobjects with synthetic source": - requires: - test_runner_features: allowed_warnings_regex + test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: + allowed_warnings: + - "index template [test] has index patterns [test-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [test] will take precedence during new index creation" indices.put_index_template: name: test body: From d71654195c38fb8cc0806c2a27689d59f8ffd1c6 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:23:57 +0100 Subject: [PATCH 045/352] [DOCS] Wrap document/field restriction tip in IMPORTANT block (#112146) --- .../authorization/field-and-document-access-control.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/security/authorization/field-and-document-access-control.asciidoc b/docs/reference/security/authorization/field-and-document-access-control.asciidoc index f4d4fcd49a35f..7c7ea75ece161 100644 --- a/docs/reference/security/authorization/field-and-document-access-control.asciidoc +++ b/docs/reference/security/authorization/field-and-document-access-control.asciidoc @@ -54,8 +54,11 @@ specify any field restrictions. If you assign a user both roles, `role_a` gives the user access to all documents and `role_b` gives the user access to all fields. +[IMPORTANT] +=========== If you need to restrict access to both documents and fields, consider splitting documents by index instead. +=========== include::role-templates.asciidoc[] include::set-security-user.asciidoc[] From 1fb2afa3df8501044a0383d53fb2e712f579109f Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:26:57 +0300 Subject: [PATCH 046/352] Re-enable yaml tests (#112157) Related to #112143 --- muted-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 463075f1f93ae..a46456a6c9ad9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -182,8 +182,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {stats.ByTwoCalculatedSecondOverwritesReferencingFirst SYNC} issue: https://github.com/elastic/elasticsearch/issues/112118 -- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/112143 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/preview_transforms/Test preview transform latest} issue: https://github.com/elastic/elasticsearch/issues/112144 From 0aa4758f02f736176a0f1c211ca701465ad05f63 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 23 Aug 2024 11:16:18 -0700 Subject: [PATCH 047/352] Stop setting java.library.path (#112119) Native libraries in Java are loaded by calling System.loadLibrary. This method inspects paths in the java.library.path to find the requested library. Elasticsearch previously used this to find libsystemd, but now the only remaining use is to set the additional platform directory in which Elasticsearch keeps its own native libraries. One issue with setting java.library.path is that its not set for the cli process, which makes loading the native library infrastructure from clis difficult. This commit reworks how Elasticsearch native libraries are found in order to avoid needing to set java.library.path. There are two cases. The simplest is production, where the working directory is the Elasticsearch installation directory, so the platform specific directory can be constructed. The second case is for tests where we don't have an installtion. We already pass in java.library.path there, so this change renames the system property to be a test specific property that the new loading infrastructure looks for. --- benchmarks/build.gradle | 2 +- .../src/main/groovy/elasticsearch.ide.gradle | 3 +- .../internal/ElasticsearchJavaBasePlugin.java | 4 +- .../gradle/internal/test/TestUtil.java | 4 +- .../server/cli/SystemJvmOptions.java | 41 ------------ .../server/cli/JvmOptionsParserTests.java | 48 +------------- .../nativeaccess/lib/LoaderHelper.java | 62 +++++++++++++++++++ .../nativeaccess/jdk/JdkVectorLibrary.java | 3 +- .../nativeaccess/jdk/JdkZstdLibrary.java | 3 +- .../VectorSystemPropertyTests.java | 2 +- 10 files changed, 72 insertions(+), 100 deletions(-) create mode 100644 libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 3f7ee8b60b53c..e2511438e7f95 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -77,7 +77,7 @@ tasks.named("run").configure { executable = "${BuildParams.runtimeJavaHome}/bin/java" args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" dependsOn "copyExpression", "copyPainless" - systemProperty 'java.library.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}") + systemProperty 'es.nativelibs.path', file("../libs/native/libraries/build/platform/${platformName()}-${os.arch}") } String platformName() { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 6cb22dad9bc79..285c3a61b08c2 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -168,8 +168,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { '-ea', '-Djava.security.manager=allow', '-Djava.locale.providers=SPI,COMPAT', - '-Djava.library.path=' + testLibraryPath, - '-Djna.library.path=' + testLibraryPath, + '-Des.nativelibs.path=' + testLibraryPath, // TODO: only open these for mockito when it is modularized '--add-opens=java.base/java.security.cert=ALL-UNNAMED', '--add-opens=java.base/java.nio.channels=ALL-UNNAMED', diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index f95d9d72a473f..a3b1dd9731591 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -189,9 +189,7 @@ private static void configureNativeLibraryPath(Project project) { var libraryPath = (Supplier) () -> TestUtil.getTestLibraryPath(nativeConfigFiles.getAsPath()); test.dependsOn(nativeConfigFiles); - // we may use JNA or the JDK's foreign function api to load libraries, so we set both sysprops - systemProperties.systemProperty("java.library.path", libraryPath); - systemProperties.systemProperty("jna.library.path", libraryPath); + systemProperties.systemProperty("es.nativelibs.path", libraryPath); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java index 96fde95d0dd17..965f3964c9a38 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestUtil.java @@ -11,7 +11,6 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.ElasticsearchDistribution; -import java.io.File; import java.util.Locale; public class TestUtil { @@ -19,8 +18,7 @@ public class TestUtil { public static String getTestLibraryPath(String nativeLibsDir) { String arch = Architecture.current().toString().toLowerCase(Locale.ROOT); String platform = String.format(Locale.ROOT, "%s-%s", ElasticsearchDistribution.CURRENT_PLATFORM, arch); - String existingLibraryPath = System.getProperty("java.library.path"); - return String.format(Locale.ROOT, "%s/%s%c%s", nativeLibsDir, platform, File.pathSeparatorChar, existingLibraryPath); + return String.format(Locale.ROOT, "%s/%s", nativeLibsDir, platform); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 2a89f18209d11..94e2d538c0ad0 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -10,11 +10,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.SuppressForbidden; -import java.io.File; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -25,7 +21,6 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { String distroType = sysprops.get("es.distribution.type"); boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); - String libraryPath = findLibraryPath(sysprops); return Stream.concat( Stream.of( @@ -73,8 +68,6 @@ static List systemJvmOptions(Settings nodeSettings, final Map TEST_SYSPROPS = Map.of( - "os.name", - "Linux", - "os.arch", - "aarch64", - "java.library.path", - "/usr/lib" - ); + private static final Map TEST_SYSPROPS = Map.of("os.name", "Linux", "os.arch", "aarch64"); public void testSubstitution() { final List jvmOptions = JvmOptionsParser.substitutePlaceholders( @@ -390,40 +380,4 @@ public void testCommandLineDistributionType() { final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); assertThat(jvmOptions, hasItem("-Des.distribution.type=testdistro")); } - - public void testLibraryPath() { - assertLibraryPath("Mac OS", "aarch64", "darwin-aarch64"); - assertLibraryPath("Mac OS", "amd64", "darwin-x64"); - assertLibraryPath("Mac OS", "x86_64", "darwin-x64"); - assertLibraryPath("Linux", "aarch64", "linux-aarch64"); - assertLibraryPath("Linux", "amd64", "linux-x64"); - assertLibraryPath("Linux", "x86_64", "linux-x64"); - assertLibraryPath("Windows", "amd64", "windows-x64"); - assertLibraryPath("Windows", "x86_64", "windows-x64"); - assertLibraryPath("Unknown", "aarch64", "unsupported_os[Unknown]-aarch64"); - assertLibraryPath("Mac OS", "Unknown", "darwin-unsupported_arch[Unknown]"); - } - - private void assertLibraryPath(String os, String arch, String expected) { - String existingPath = "/usr/lib"; - var sysprops = Map.of("os.name", os, "os.arch", arch, "java.library.path", existingPath); - final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY, sysprops); - Map options = new HashMap<>(); - for (var jvmOption : jvmOptions) { - if (jvmOption.startsWith("-D")) { - String[] parts = jvmOption.substring(2).split("="); - assert parts.length == 2; - options.put(parts[0], parts[1]); - } - } - String separator = FileSystems.getDefault().getSeparator(); - assertThat( - options, - hasEntry(equalTo("java.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - assertThat( - options, - hasEntry(equalTo("jna.library.path"), allOf(containsString("platform" + separator + expected), containsString(existingPath))) - ); - } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java new file mode 100644 index 0000000000000..4da52c415c040 --- /dev/null +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.nativeaccess.lib; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * A utility for loading libraries from Elasticsearch's platform specific lib dir. + */ +public class LoaderHelper { + private static final Path platformLibDir = findPlatformLibDir(); + + private static Path findPlatformLibDir() { + // tests don't have an ES install, so the platform dir must be passed in explicitly + String path = System.getProperty("es.nativelibs.path"); + if (path != null) { + return Paths.get(path); + } + + Path platformDir = Paths.get("lib", "platform"); + + String osname = System.getProperty("os.name"); + String os; + if (osname.startsWith("Windows")) { + os = "windows"; + } else if (osname.startsWith("Linux")) { + os = "linux"; + } else if (osname.startsWith("Mac OS")) { + os = "darwin"; + } else { + os = "unsupported_os[" + osname + "]"; + } + String archname = System.getProperty("os.arch"); + String arch; + if (archname.equals("amd64") || archname.equals("x86_64")) { + arch = "x64"; + } else if (archname.equals("aarch64")) { + arch = archname; + } else { + arch = "unsupported_arch[" + archname + "]"; + } + return platformDir.resolve(os + "-" + arch); + } + + public static void loadLibrary(String libname) { + Path libpath = platformLibDir.resolve(System.mapLibraryName(libname)); + if (Files.exists(libpath) == false) { + throw new UnsatisfiedLinkError("Native library [" + libpath + "] does not exist"); + } + System.load(libpath.toAbsolutePath().toString()); + } + + private LoaderHelper() {} // no construction +} diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index c92ad654c9b9a..a1032f1381d94 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import java.lang.foreign.FunctionDescriptor; @@ -29,7 +30,7 @@ public final class JdkVectorLibrary implements VectorLibrary { static final VectorSimilarityFunctions INSTANCE; static { - System.loadLibrary("vec"); + LoaderHelper.loadLibrary("vec"); final MethodHandle vecCaps$mh = downcallHandle("vec_caps", FunctionDescriptor.of(JAVA_INT)); try { diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java index e3e972bc19d72..284ac134d2036 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkZstdLibrary.java @@ -9,6 +9,7 @@ package org.elasticsearch.nativeaccess.jdk; import org.elasticsearch.nativeaccess.CloseableByteBuffer; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.ZstdLibrary; import java.lang.foreign.FunctionDescriptor; @@ -24,7 +25,7 @@ class JdkZstdLibrary implements ZstdLibrary { static { - System.loadLibrary("zstd"); + LoaderHelper.loadLibrary("zstd"); } private static final MethodHandle compressBound$mh = downcallHandle("ZSTD_compressBound", FunctionDescriptor.of(JAVA_LONG, JAVA_INT)); diff --git a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java index 9875878d8658a..cda4fc8c55444 100644 --- a/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java +++ b/libs/native/src/test/java/org/elasticsearch/nativeaccess/VectorSystemPropertyTests.java @@ -49,7 +49,7 @@ public void testSystemPropertyDisabled() throws Exception { "-Xms4m", "-cp", jarPath + File.pathSeparator + System.getProperty("java.class.path"), - "-Djava.library.path=" + System.getProperty("java.library.path"), + "-Des.nativelibs.path=" + System.getProperty("es.nativelibs.path"), "p.Test" ).start(); String output = new String(process.getInputStream().readAllBytes(), UTF_8); From 75b0c5d13b93cc2092896e75c52af20e5bed3928 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Fri, 23 Aug 2024 15:00:05 -0400 Subject: [PATCH 048/352] Ignore under construction data types for params checking (#112163) This should prevent tests failing when we've added a type to the params annotation but the data type is disabled via feature flag. --- .../function/AbstractFunctionTestCase.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index efb078cbe80e0..c79f5ab8d086b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -680,6 +680,10 @@ public void testSerializationOfSimple() { assertSerialization(buildFieldExpression(testCase)); } + /** + * This test is meant to validate that the params annotations for the function being tested align with the supported types the + * test framework has detected. + */ @AfterClass public static void testFunctionInfo() { Logger log = LogManager.getLogger(getTestClass()); @@ -717,14 +721,23 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size(); i++) { EsqlFunctionRegistry.ArgSignature arg = args.get(i); - Set annotationTypes = Arrays.stream(arg.type()).collect(Collectors.toCollection(TreeSet::new)); - Set signatureTypes = typesFromSignature.get(i); + Set annotationTypes = Arrays.stream(arg.type()) + .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .collect(Collectors.toCollection(TreeSet::new)); + Set signatureTypes = typesFromSignature.get(i) + .stream() + .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .collect(Collectors.toCollection(TreeSet::new)); if (signatureTypes.isEmpty()) { log.info("{}: skipping", arg.name()); continue; } log.info("{}: tested {} vs annotated {}", arg.name(), signatureTypes, annotationTypes); - assertEquals(signatureTypes, annotationTypes); + assertEquals( + "Missmatch between actual and declared parameter types. You probably need to update your @params annotations.", + signatureTypes, + annotationTypes + ); } Set returnTypes = Arrays.stream(description.returnType()).collect(Collectors.toCollection(TreeSet::new)); From 0f6529dec5ff3d3f4ca7d0aaef68e2675dd506b2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 23 Aug 2024 20:06:27 +0100 Subject: [PATCH 049/352] Improve threading when restoring snapshot (#112162) We use the `SNAPSHOT_META` pool for some of the work needed to start a snapshot restore, but it's a little tangled with other work happening on a `transport_worker`, or some other random threadpool on which `getRepositoryData` completes. This commit ensures that we use `SNAPSHOT_META` throughout. Relates #101445 --- .../TransportRestoreSnapshotAction.java | 3 +- .../snapshots/RestoreService.java | 54 +++++++++++-------- .../snapshots/RestoreServiceTests.java | 15 +++++- .../ccr/action/TransportPutFollowAction.java | 10 ++-- 4 files changed, 53 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index d7a14362026ef..ba34b8cab1021 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.tasks.Task; @@ -49,7 +48,7 @@ public TransportRestoreSnapshotAction( RestoreSnapshotRequest::new, indexNameExpressionResolver, RestoreSnapshotResponse::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE + threadPool.executor(ThreadPool.Names.SNAPSHOT_META) ); this.restoreService = restoreService; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 25796606f2b1b..0f03cfab4ad2e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -56,7 +56,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; @@ -95,6 +94,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -188,6 +188,8 @@ public final class RestoreService implements ClusterStateApplier { private final ThreadPool threadPool; + private final Executor snapshotMetaExecutor; + private volatile boolean refreshRepositoryUuidOnRestore; public RestoreService( @@ -216,6 +218,7 @@ public RestoreService( this.indicesService = indicesService; this.fileSettingsService = fileSettingsService; this.threadPool = threadPool; + this.snapshotMetaExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT_META); this.refreshRepositoryUuidOnRestore = REFRESH_REPO_UUID_ON_RESTORE_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(REFRESH_REPO_UUID_ON_RESTORE_SETTING, this::setRefreshRepositoryUuidOnRestore); @@ -244,24 +247,28 @@ public void restoreSnapshot( final ActionListener listener, final BiConsumer updater ) { + assert Repository.assertSnapshotMetaThread(); try { // Try and fill in any missing repository UUIDs in case they're needed during the restore final var repositoryUuidRefreshStep = new ListenableFuture(); - refreshRepositoryUuids(refreshRepositoryUuidOnRestore, repositoriesService, () -> repositoryUuidRefreshStep.onResponse(null)); + refreshRepositoryUuids( + refreshRepositoryUuidOnRestore, + repositoriesService, + () -> repositoryUuidRefreshStep.onResponse(null), + snapshotMetaExecutor + ); // Read snapshot info and metadata from the repository final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - repository.getRepositoryData( - EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading here, do we need to fork, see #101445? - repositoryDataListener - ); + repository.getRepositoryData(snapshotMetaExecutor, repositoryDataListener); repositoryDataListener.addListener( listener.delegateFailureAndWrap( (delegate, repositoryData) -> repositoryUuidRefreshStep.addListener( delegate.delegateFailureAndWrap((subDelegate, ignored) -> { + assert Repository.assertSnapshotMetaThread(); final String snapshotName = request.snapshot(); final Optional matchingSnapshotId = repositoryData.getSnapshotIds() .stream() @@ -511,12 +518,18 @@ private void setRefreshRepositoryUuidOnRestore(boolean refreshRepositoryUuidOnRe * Best-effort attempt to make sure that we know all the repository UUIDs. Calls {@link Repository#getRepositoryData} on every * {@link BlobStoreRepository} with a missing UUID. * - * @param enabled If {@code false} this method completes the listener immediately + * @param enabled If {@code false} this method completes the listener immediately * @param repositoriesService Supplies the repositories to check - * @param onCompletion Action that is executed when all repositories have been refreshed. + * @param onCompletion Action that is executed when all repositories have been refreshed. + * @param responseExecutor Executor on which to execute {@code onCompletion} if not using the calling thread. */ // Exposed for tests - static void refreshRepositoryUuids(boolean enabled, RepositoriesService repositoriesService, Runnable onCompletion) { + static void refreshRepositoryUuids( + boolean enabled, + RepositoriesService repositoriesService, + Runnable onCompletion, + Executor responseExecutor + ) { try (var refs = new RefCountingRunnable(onCompletion)) { if (enabled == false) { logger.debug("repository UUID refresh is disabled"); @@ -530,20 +543,17 @@ static void refreshRepositoryUuids(boolean enabled, RepositoriesService reposito if (repository instanceof BlobStoreRepository && repository.getMetadata().uuid().equals(RepositoryData.MISSING_UUID)) { final var repositoryName = repository.getMetadata().name(); logger.info("refreshing repository UUID for repository [{}]", repositoryName); - repository.getRepositoryData( - EsExecutors.DIRECT_EXECUTOR_SERVICE, // TODO contemplate threading here, do we need to fork, see #101445? - ActionListener.releaseAfter(new ActionListener<>() { - @Override - public void onResponse(RepositoryData repositoryData) { - logger.debug(() -> format("repository UUID [%s] refresh completed", repositoryName)); - } + repository.getRepositoryData(responseExecutor, ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + logger.debug(() -> format("repository UUID [%s] refresh completed", repositoryName)); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> format("repository UUID [%s] refresh failed", repositoryName), e); - } - }, refs.acquire()) - ); + @Override + public void onFailure(Exception e) { + logger.debug(() -> format("repository UUID [%s] refresh failed", repositoryName), e); + } + }, refs.acquire())); } } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index 0d0293b962609..726d8fce4fc44 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoriesService; @@ -159,7 +160,12 @@ public void testPrefixNotChanged() { public void testRefreshRepositoryUuidsDoesNothingIfDisabled() { final RepositoriesService repositoriesService = mock(RepositoriesService.class); final AtomicBoolean called = new AtomicBoolean(); - RestoreService.refreshRepositoryUuids(false, repositoriesService, () -> assertTrue(called.compareAndSet(false, true))); + RestoreService.refreshRepositoryUuids( + false, + repositoriesService, + () -> assertTrue(called.compareAndSet(false, true)), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); assertTrue(called.get()); verifyNoMoreInteractions(repositoriesService); } @@ -209,7 +215,12 @@ public void testRefreshRepositoryUuidsRefreshesAsNeeded() { final RepositoriesService repositoriesService = mock(RepositoriesService.class); when(repositoriesService.getRepositories()).thenReturn(repositories); final AtomicBoolean completed = new AtomicBoolean(); - RestoreService.refreshRepositoryUuids(true, repositoriesService, () -> assertTrue(completed.compareAndSet(false, true))); + RestoreService.refreshRepositoryUuids( + true, + repositoriesService, + () -> assertTrue(completed.compareAndSet(false, true)), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); assertTrue(completed.get()); assertThat(pendingRefreshes, empty()); finalAssertions.forEach(Runnable::run); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index f31916cc7cf82..d8e634a297bfa 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.ActionFilters; @@ -206,15 +207,17 @@ private void createFollowerIndex( ActionListener delegatelistener = listener.delegateFailure( (delegatedListener, response) -> afterRestoreStarted(clientWithHeaders, request, delegatedListener, response) ); + + final BiConsumer updater; if (remoteDataStream == null) { // If the index we're following is not part of a data stream, start the // restoration of the index normally. - restoreService.restoreSnapshot(restoreRequest, delegatelistener); + updater = (clusterState, mdBuilder) -> {}; } else { String followerIndexName = request.getFollowerIndex(); // This method is used to update the metadata in the same cluster state // update as the snapshot is restored. - BiConsumer updater = (currentState, mdBuilder) -> { + updater = (currentState, mdBuilder) -> { final String localDataStreamName; // If we have been given a data stream name, use that name for the local @@ -239,8 +242,9 @@ private void createFollowerIndex( ); mdBuilder.put(updatedDataStream); }; - restoreService.restoreSnapshot(restoreRequest, delegatelistener, updater); } + threadPool.executor(ThreadPool.Names.SNAPSHOT_META) + .execute(ActionRunnable.wrap(delegatelistener, l -> restoreService.restoreSnapshot(restoreRequest, l, updater))); } private void afterRestoreStarted( From 8c85d442b15170aa645e4aa38a5c14130d0df1ce Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 23 Aug 2024 15:23:25 -0400 Subject: [PATCH 050/352] ESQL: Lock some data types in profile test (#112165) The test for the output from `profile` can sometimes return `long` and sometimes return `int`. That's fine, really. It just makes testing annoying. This promotes the types to always be a `long` in the test. Closes #112049 Closes #112039 --- .../xpack/esql/qa/single_node/RestEsqlIT.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index b0fa233965da6..44550c62bd7c5 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -294,6 +294,7 @@ public void testProfile() throws IOException { @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") @@ -353,6 +354,7 @@ public void testInlineStatsProfile() throws IOException { @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") @@ -457,6 +459,7 @@ public void testForceSleepsProfile() throws IOException { List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { + fixTypesOnProfile(p); assertMap(p, commonProfile()); @SuppressWarnings("unchecked") Map sleeps = (Map) p.get("sleeps"); @@ -497,13 +500,24 @@ public void testForceSleepsProfile() throws IOException { private MapMatcher commonProfile() { return matchesMap().entry("start_millis", greaterThan(0L)) .entry("stop_millis", greaterThan(0L)) - .entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) + .entry("iterations", greaterThan(0L)) + .entry("cpu_nanos", greaterThan(0L)) + .entry("took_nanos", greaterThan(0L)) .entry("operators", instanceOf(List.class)) .entry("sleeps", matchesMap().extraOk()); } + /** + * Fix some of the types on the profile results. Sometimes they + * come back as integers and sometimes longs. This just promotes + * them to long every time. + */ + private void fixTypesOnProfile(Map profile) { + profile.put("iterations", ((Number) profile.get("iterations")).longValue()); + profile.put("cpu_nanos", ((Number) profile.get("cpu_nanos")).longValue()); + profile.put("took_nanos", ((Number) profile.get("took_nanos")).longValue()); + } + private String checkOperatorProfile(Map o) { String name = (String) o.get("operator"); name = name.replaceAll("\\[.+", ""); From 9d6bef1651c0db93b2eacd36ac6bc3adfb389102 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 23 Aug 2024 15:26:46 -0400 Subject: [PATCH 051/352] Docs: Scripted metric not available in serverless (#112161) This updates the docs to say that scripted metric is not available in serverless. --- .../metrics/scripted-metric-aggregation.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index d7d837b2f8364..16879450c65d8 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -6,6 +6,8 @@ A metric aggregation that executes using scripts to provide a metric output. +WARNING: `scripted_metric` is not available in {serverless-full}. + WARNING: Using scripts can result in slower search speeds. See <>. @@ -127,7 +129,7 @@ init_script:: Executed prior to any collection of documents. Allows the ag + In the above example, the `init_script` creates an array `transactions` in the `state` object. -map_script:: Executed once per document collected. This is a required script. +map_script:: Executed once per document collected. This is a required script. + In the above example, the `map_script` checks the value of the type field. If the value is 'sale' the value of the amount field is added to the transactions array. If the value of the type field is not 'sale' the negated value of the amount field is added @@ -282,4 +284,4 @@ params:: Optional. An object whose contents will be passed as variable If a parent bucket of the scripted metric aggregation does not collect any documents an empty aggregation response will be returned from the shard with a `null` value. In this case the `reduce_script`'s `states` variable will contain `null` as a response from that shard. -`reduce_script`'s should therefore expect and deal with `null` responses from shards. +`reduce_script`'s should therefore expect and deal with `null` responses from shards. From 2c9406861c4b2b0c7274b4aa8c10fa446fb17302 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 23 Aug 2024 13:30:28 -0700 Subject: [PATCH 052/352] Allow query pragmas in release builds (#111953) I have investigated an issue with QA clusters that run release builds. I wish I could enable query pragmas to confirm the problem instead of setting up new clusters and replicating data before testing the theory. This change allows users to enable query pragmas in release builds. However, due to the risks associated with using pragmas, the accept_pragma_risks parameter must be explicitly set to true to proceed. --- .../elasticsearch/xpack/esql/action/EsqlQueryRequest.java | 8 +++++++- .../elasticsearch/xpack/esql/action/RequestXContent.java | 2 ++ .../xpack/esql/action/EsqlQueryRequestTests.java | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 5c9b4244ec0ca..4ab310863c61d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -50,6 +50,7 @@ public class EsqlQueryRequest extends org.elasticsearch.xpack.core.esql.action.E private TimeValue keepAlive = DEFAULT_KEEP_ALIVE; private boolean keepOnCompletion; private boolean onSnapshotBuild = Build.current().isSnapshot(); + private boolean acceptedPragmaRisks = false; /** * "Tables" provided in the request for use with things like {@code LOOKUP}. @@ -78,8 +79,9 @@ public ActionRequestValidationException validate() { if (Strings.hasText(query) == false) { validationException = addValidationError("[" + RequestXContent.QUERY_FIELD + "] is required", validationException); } + if (onSnapshotBuild == false) { - if (pragmas.isEmpty() == false) { + if (pragmas.isEmpty() == false && acceptedPragmaRisks == false) { validationException = addValidationError( "[" + RequestXContent.PRAGMA_FIELD + "] only allowed in snapshot builds", validationException @@ -230,4 +232,8 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, void onSnapshotBuild(boolean onSnapshotBuild) { this.onSnapshotBuild = onSnapshotBuild; } + + void acceptedPragmaRisks(boolean accepted) { + this.acceptedPragmaRisks = accepted; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 4c511a4450bc8..810e313002189 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -69,6 +69,7 @@ String fields() { private static final ParseField PARAMS_FIELD = new ParseField("params"); private static final ParseField LOCALE_FIELD = new ParseField("locale"); private static final ParseField PROFILE_FIELD = new ParseField("profile"); + private static final ParseField ACCEPT_PRAGMA_RISKS = new ParseField("accept_pragma_risks"); static final ParseField TABLES_FIELD = new ParseField("tables"); static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); @@ -92,6 +93,7 @@ private static void objectParserCommon(ObjectParser parser) parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); + parser.declareBoolean(EsqlQueryRequest::acceptedPragmaRisks, ACCEPT_PRAGMA_RISKS); parser.declareObject( EsqlQueryRequest::pragmas, (p, c) -> new QueryPragmas(Settings.builder().loadFromMap(p.map()).build()), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index 890a611fdea10..b1dff5ce8c342 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -302,6 +302,9 @@ public void testPragmasOnlyValidOnSnapshot() throws IOException { request.onSnapshotBuild(false); assertNotNull(request.validate()); assertThat(request.validate().getMessage(), containsString("[pragma] only allowed in snapshot builds")); + + request.acceptedPragmaRisks(true); + assertNull(request.validate()); } public void testTablesKeyword() throws IOException { From 150235223e0f9e546d2c9773ad951b9262679754 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 24 Aug 2024 15:59:34 +1000 Subject: [PATCH 053/352] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex SYNC} #112180 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a46456a6c9ad9..f68e0f395a9b4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -187,6 +187,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112144 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112147 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112180 # Examples: # From 4c200f60a8e78366ad05aef5590ef9e007fd59ec Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 24 Aug 2024 15:09:50 +0200 Subject: [PATCH 054/352] Make ClusterSettings.BUILT_IN_CLUSTER_SETTINGS immutable (#112185) This should be a final and immutable field. Also the way this thing was constructed is weird, we don't expect any null values for constants. --- .../org/elasticsearch/common/settings/ClusterSettings.java | 7 ++----- .../application/rules/QueryRulesIndexServiceTests.java | 3 ++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c023b00ec820f..8d9d8452b12bb 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -130,11 +130,8 @@ import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.watcher.ResourceWatcherService; -import java.util.Objects; import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Encapsulates all valid cluster level settings. @@ -205,7 +202,7 @@ public void apply(Settings value, Settings current, Settings previous) { } } - public static Set> BUILT_IN_CLUSTER_SETTINGS = Stream.of( + public static final Set> BUILT_IN_CLUSTER_SETTINGS = Set.of( AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, @@ -602,5 +599,5 @@ public void apply(Settings value, Settings current, Settings previous) { TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING - ).filter(Objects::nonNull).collect(Collectors.toSet()); + ); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java index 36d5bb91e619d..4de5445871739 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexServiceTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -50,7 +51,7 @@ public class QueryRulesIndexServiceTests extends ESSingleNodeTestCase { @Before public void setup() { - Set> settingsSet = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + Set> settingsSet = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); settingsSet.addAll(QueryRulesConfig.getSettings()); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); this.queryRulesIndexService = new QueryRulesIndexService(client(), clusterSettings); From 22ca3810f911035ad5cbecf36500423e3c1e54b1 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sat, 24 Aug 2024 15:17:11 +0200 Subject: [PATCH 055/352] Make slow logger's Logger instances static (#112183) These can be made static now that they aren't index specific any longer, saving measurable time in test execution + it's just the right thing to do here. --- .../elasticsearch/index/IndexingSlowLog.java | 8 +++++--- .../elasticsearch/index/SearchSlowLog.java | 19 +++++++++---------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 14c2c5440bd24..b4a7571e96802 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -90,7 +90,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { Property.IndexSettingDeprecatedInV7AndRemovedInV8 ); - private final Logger indexLogger; + private static final Logger indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); + static { + Loggers.setLevel(indexLogger, Level.TRACE); + } + private final Index index; private boolean reformat; @@ -127,8 +131,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { IndexingSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { this.slowLogFieldProvider = slowLogFieldProvider; - this.indexLogger = LogManager.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"); - Loggers.setLevel(this.indexLogger, Level.TRACE); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index eb227e6e1136d..6aff86d32c5a3 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -41,12 +41,17 @@ public final class SearchSlowLog implements SearchOperationListener { private long fetchDebugThreshold; private long fetchTraceThreshold; - private final Logger queryLogger; - private final Logger fetchLogger; + static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; - private final SlowLogFieldProvider slowLogFieldProvider; + private static final Logger queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); + private static final Logger fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); - static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; + static { + Loggers.setLevel(queryLogger, Level.TRACE); + Loggers.setLevel(fetchLogger, Level.TRACE); + } + + private final SlowLogFieldProvider slowLogFieldProvider; public static final Setting INDEX_SEARCH_SLOWLOG_INCLUDE_USER_SETTING = Setting.boolSetting( INDEX_SEARCH_SLOWLOG_PREFIX + ".include.user", @@ -130,12 +135,6 @@ public final class SearchSlowLog implements SearchOperationListener { public SearchSlowLog(IndexSettings indexSettings, SlowLogFieldProvider slowLogFieldProvider) { slowLogFieldProvider.init(indexSettings); this.slowLogFieldProvider = slowLogFieldProvider; - - this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); - this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); - Loggers.setLevel(this.fetchLogger, Level.TRACE); - Loggers.setLevel(this.queryLogger, Level.TRACE); - indexSettings.getScopedSettings() .addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); From ecd793067bcc646bb51ab70531672722e8ba8692 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 06:47:05 +1000 Subject: [PATCH 056/352] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {mv_percentile.FromIndex SYNC} #112187 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f68e0f395a9b4..c8f7abebfa391 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -190,6 +190,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex SYNC} issue: https://github.com/elastic/elasticsearch/issues/112180 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {mv_percentile.FromIndex SYNC} + issue: https://github.com/elastic/elasticsearch/issues/112187 # Examples: # From d053d39472abbe2cdbbff5077f31a861d2ed03ad Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 06:47:15 +1000 Subject: [PATCH 057/352] Mute org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT test {mv_percentile.FromIndex ASYNC} #112188 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index c8f7abebfa391..9708b43fbfdfa 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -193,6 +193,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {mv_percentile.FromIndex SYNC} issue: https://github.com/elastic/elasticsearch/issues/112187 +- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT + method: test {mv_percentile.FromIndex ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112188 # Examples: # From 48dabe851c7944fa7b5d92b50485e9dd22906c76 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 14:17:01 +1000 Subject: [PATCH 058/352] Mute org.elasticsearch.smoketest.WatcherYamlRestIT test {p0=watcher/usage/10_basic/Test watcher usage stats output} #112189 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9708b43fbfdfa..0557cc1b7408a 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -196,6 +196,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {mv_percentile.FromIndex ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112188 +- class: org.elasticsearch.smoketest.WatcherYamlRestIT + method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} + issue: https://github.com/elastic/elasticsearch/issues/112189 # Examples: # From 18662335ffe00db0e79141b93c3c0bd3021fd585 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sun, 25 Aug 2024 22:44:12 +1000 Subject: [PATCH 059/352] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_processor/Test create processor with missing mandatory fields} #112191 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 0557cc1b7408a..753e7006863b9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -199,6 +199,9 @@ tests: - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} + issue: https://github.com/elastic/elasticsearch/issues/112191 # Examples: # From 6f539585d56dd1d12f270745ac878309208e4e88 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 26 Aug 2024 06:44:18 +1000 Subject: [PATCH 060/352] Mute org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT test {mv_percentile.FromIndex ASYNC} #112193 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 753e7006863b9..68a53e5cb04dd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -202,6 +202,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} issue: https://github.com/elastic/elasticsearch/issues/112191 +- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT + method: test {mv_percentile.FromIndex ASYNC} + issue: https://github.com/elastic/elasticsearch/issues/112193 # Examples: # From 29453cb2cea93010654fcc0f6d5936f67d0b0acd Mon Sep 17 00:00:00 2001 From: Panos Koutsovasilis Date: Mon, 26 Aug 2024 08:37:40 +0300 Subject: [PATCH 061/352] fix: support all allowed protocol numbers (#111528) * fix(CommunityIdProcessor): support all allowed protocol numbers * fix(CommunityIdProcessor): update documentation --- .../ingest/processors/community-id.asciidoc | 7 +- .../ingest/common/CommunityIdProcessor.java | 138 +++++++++++------- .../common/CommunityIdProcessorTests.java | 33 ++++- 3 files changed, 116 insertions(+), 62 deletions(-) diff --git a/docs/reference/ingest/processors/community-id.asciidoc b/docs/reference/ingest/processors/community-id.asciidoc index 03e65ac04a209..2d86bd21fa1e9 100644 --- a/docs/reference/ingest/processors/community-id.asciidoc +++ b/docs/reference/ingest/processors/community-id.asciidoc @@ -23,11 +23,12 @@ configuration is required. | `source_port` | no | `source.port` | Field containing the source port. | `destination_ip` | no | `destination.ip` | Field containing the destination IP address. | `destination_port` | no | `destination.port` | Field containing the destination port. -| `iana_number` | no | `network.iana_number` | Field containing the IANA number. The following protocol numbers are currently supported: `1` ICMP, `2` IGMP, `6` TCP, `17` UDP, `47` GRE, `58` ICMP IPv6, `88` EIGRP, `89` OSPF, `103` PIM, and `132` SCTP. +| `iana_number` | no | `network.iana_number` | Field containing the IANA number. | `icmp_type` | no | `icmp.type` | Field containing the ICMP type. | `icmp_code` | no | `icmp.code` | Field containing the ICMP code. -| `transport` | no | `network.transport` | Field containing the transport protocol. -Used only when the `iana_number` field is not present. +| `transport` | no | `network.transport` | Field containing the transport protocol name or number. +Used only when the `iana_number` field is not present. The following protocol names are currently supported: +`ICMP`, `IGMP`, `TCP`, `UDP`, `GRE`, `ICMP IPv6`, `EIGRP`, `OSPF`, `PIM`, and `SCTP`. | `target_field` | no | `network.community_id` | Output field for the community ID. | `seed` | no | `0` | Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The seed can prevent hash collisions between network domains, such as diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java index 27ef5a10dd5c2..0377da53846d5 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java @@ -225,7 +225,7 @@ private static Flow buildFlow( } flow.protocol = Transport.fromObject(protocol); - switch (flow.protocol) { + switch (flow.protocol.getType()) { case Tcp, Udp, Sctp -> { flow.sourcePort = parseIntFromObjectOrString(sourcePort.get(), "source port"); if (flow.sourcePort < 1 || flow.sourcePort > 65535) { @@ -336,12 +336,12 @@ public CommunityIdProcessor create( */ public static final class Flow { - private static final List TRANSPORTS_WITH_PORTS = List.of( - Transport.Tcp, - Transport.Udp, - Transport.Sctp, - Transport.Icmp, - Transport.IcmpIpV6 + private static final List TRANSPORTS_WITH_PORTS = List.of( + Transport.Type.Tcp, + Transport.Type.Udp, + Transport.Type.Sctp, + Transport.Type.Icmp, + Transport.Type.IcmpIpV6 ); InetAddress source; @@ -362,20 +362,21 @@ boolean isOrdered() { } byte[] toBytes() { - boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protocol); + Transport.Type protoType = protocol.getType(); + boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protoType); int len = source.getAddress().length + destination.getAddress().length + 2 + (hasPort ? 4 : 0); ByteBuffer bb = ByteBuffer.allocate(len); boolean isOneWay = false; - if (protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) { + if (protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) { // ICMP protocols populate port fields with ICMP data - Integer equivalent = IcmpType.codeEquivalent(icmpType, protocol == Transport.IcmpIpV6); + Integer equivalent = IcmpType.codeEquivalent(icmpType, protoType == Transport.Type.IcmpIpV6); isOneWay = equivalent == null; sourcePort = icmpType; destinationPort = equivalent == null ? icmpCode : equivalent; } - boolean keepOrder = isOrdered() || ((protocol == Transport.Icmp || protocol == Transport.IcmpIpV6) && isOneWay); + boolean keepOrder = isOrdered() || ((protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) && isOneWay); bb.put(keepOrder ? source.getAddress() : destination.getAddress()); bb.put(keepOrder ? destination.getAddress() : source.getAddress()); bb.put(toUint16(protocol.getTransportNumber() << 8)); @@ -397,39 +398,63 @@ String toCommunityId(byte[] seed) { } } - public enum Transport { - Icmp(1), - Igmp(2), - Tcp(6), - Udp(17), - Gre(47), - IcmpIpV6(58), - Eigrp(88), - Ospf(89), - Pim(103), - Sctp(132); - - private final int transportNumber; + static class Transport { + public enum Type { + Unknown(-1), + Icmp(1), + Igmp(2), + Tcp(6), + Udp(17), + Gre(47), + IcmpIpV6(58), + Eigrp(88), + Ospf(89), + Pim(103), + Sctp(132); + + private final int transportNumber; + + private static final Map TRANSPORT_NAMES; + + static { + TRANSPORT_NAMES = new HashMap<>(); + TRANSPORT_NAMES.put("icmp", Icmp); + TRANSPORT_NAMES.put("igmp", Igmp); + TRANSPORT_NAMES.put("tcp", Tcp); + TRANSPORT_NAMES.put("udp", Udp); + TRANSPORT_NAMES.put("gre", Gre); + TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); + TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); + TRANSPORT_NAMES.put("eigrp", Eigrp); + TRANSPORT_NAMES.put("ospf", Ospf); + TRANSPORT_NAMES.put("pim", Pim); + TRANSPORT_NAMES.put("sctp", Sctp); + } - private static final Map TRANSPORT_NAMES; + Type(int transportNumber) { + this.transportNumber = transportNumber; + } - static { - TRANSPORT_NAMES = new HashMap<>(); - TRANSPORT_NAMES.put("icmp", Icmp); - TRANSPORT_NAMES.put("igmp", Igmp); - TRANSPORT_NAMES.put("tcp", Tcp); - TRANSPORT_NAMES.put("udp", Udp); - TRANSPORT_NAMES.put("gre", Gre); - TRANSPORT_NAMES.put("ipv6-icmp", IcmpIpV6); - TRANSPORT_NAMES.put("icmpv6", IcmpIpV6); - TRANSPORT_NAMES.put("eigrp", Eigrp); - TRANSPORT_NAMES.put("ospf", Ospf); - TRANSPORT_NAMES.put("pim", Pim); - TRANSPORT_NAMES.put("sctp", Sctp); + public int getTransportNumber() { + return transportNumber; + } } - Transport(int transportNumber) { + private Type type; + private int transportNumber; + + Transport(int transportNumber, Type type) { // Change constructor to public this.transportNumber = transportNumber; + this.type = type; + } + + Transport(Type type) { // Change constructor to public + this.transportNumber = type.getTransportNumber(); + this.type = type; + } + + public Type getType() { + return this.type; } public int getTransportNumber() { @@ -437,19 +462,26 @@ public int getTransportNumber() { } public static Transport fromNumber(int transportNumber) { - return switch (transportNumber) { - case 1 -> Icmp; - case 2 -> Igmp; - case 6 -> Tcp; - case 17 -> Udp; - case 47 -> Gre; - case 58 -> IcmpIpV6; - case 88 -> Eigrp; - case 89 -> Ospf; - case 103 -> Pim; - case 132 -> Sctp; - default -> throw new IllegalArgumentException("unknown transport protocol number [" + transportNumber + "]"); + if (transportNumber < 0 || transportNumber >= 255) { + // transport numbers range https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + throw new IllegalArgumentException("invalid transport protocol number [" + transportNumber + "]"); + } + + Type type = switch (transportNumber) { + case 1 -> Type.Icmp; + case 2 -> Type.Igmp; + case 6 -> Type.Tcp; + case 17 -> Type.Udp; + case 47 -> Type.Gre; + case 58 -> Type.IcmpIpV6; + case 88 -> Type.Eigrp; + case 89 -> Type.Ospf; + case 103 -> Type.Pim; + case 132 -> Type.Sctp; + default -> Type.Unknown; }; + + return new Transport(transportNumber, type); } public static Transport fromObject(Object o) { @@ -457,8 +489,8 @@ public static Transport fromObject(Object o) { return fromNumber(number.intValue()); } else if (o instanceof String protocolStr) { // check if matches protocol name - if (TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { - return TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT)); + if (Type.TRANSPORT_NAMES.containsKey(protocolStr.toLowerCase(Locale.ROOT))) { + return new Transport(Type.TRANSPORT_NAMES.get(protocolStr.toLowerCase(Locale.ROOT))); } // check if convertible to protocol number diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java index ca9b3f3d81bd9..3848f4531adcb 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorTests.java @@ -166,14 +166,30 @@ public void testBeatsProtocolNumber() throws Exception { testCommunityIdProcessor(event, "1:D3t8Q1aFA6Ev0A/AO4i9PnU3AeI="); } - public void testBeatsIanaNumber() throws Exception { + public void testBeatsIanaNumberProtocolTCP() throws Exception { @SuppressWarnings("unchecked") var network = (Map) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); } + public void testBeatsIanaNumberProtocolIPv4() throws Exception { + @SuppressWarnings("unchecked") + var network = (Map) event.get("network"); + network.put("iana_number", "4"); + network.remove("transport"); + @SuppressWarnings("unchecked") + var source = (Map) event.get("source"); + source.put("ip", "192.168.1.2"); + source.remove("port"); + @SuppressWarnings("unchecked") + var destination = (Map) event.get("destination"); + destination.put("ip", "10.1.2.3"); + destination.remove("port"); + testCommunityIdProcessor(event, "1:KXQzmk3bdsvD6UXj7dvQ4bM6Zvw="); + } + public void testIpv6() throws Exception { @SuppressWarnings("unchecked") var source = (Map) event.get("source"); @@ -201,10 +217,10 @@ public void testStringAndNumber() throws Exception { @SuppressWarnings("unchecked") var network = (Map) event.get("network"); network.remove("transport"); - network.put("iana_number", CommunityIdProcessor.Transport.Tcp.getTransportNumber()); + network.put("iana_number", CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber()); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); - network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Tcp.getTransportNumber())); + network.put("iana_number", Integer.toString(CommunityIdProcessor.Transport.Type.Tcp.getTransportNumber())); testCommunityIdProcessor(event, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="); // protocol number @@ -359,8 +375,13 @@ private void testCommunityIdProcessor(Map source, int seed, Stri } public void testTransportEnum() { - for (CommunityIdProcessor.Transport t : CommunityIdProcessor.Transport.values()) { - assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()), equalTo(t)); + for (CommunityIdProcessor.Transport.Type t : CommunityIdProcessor.Transport.Type.values()) { + if (t == CommunityIdProcessor.Transport.Type.Unknown) { + expectThrows(IllegalArgumentException.class, () -> CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber())); + continue; + } + + assertThat(CommunityIdProcessor.Transport.fromNumber(t.getTransportNumber()).getType(), equalTo(t)); } } From f318f2234676a57f3e03333a4c2ee516d832fb37 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Mon, 26 Aug 2024 08:39:22 +0200 Subject: [PATCH 062/352] Give executor to cache instead of string (#111711) (#112091) Relates ES-8155 Co-authored-by: Iraklis Psaroudakis --- .../shared/SharedBlobCacheService.java | 6 ++-- .../shared/SharedBlobCacheServiceTests.java | 34 +++++++++---------- .../SearchableSnapshots.java | 2 +- .../AbstractSearchableSnapshotsTestCase.java | 6 ++-- .../store/input/FrozenIndexInputTests.java | 2 +- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 584e551f1cf6b..6a55738b864d1 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -333,7 +333,7 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics ) { this(environment, settings, threadPool, ioExecutor, blobCacheMetrics, System::nanoTime); @@ -343,12 +343,12 @@ public SharedBlobCacheService( NodeEnvironment environment, Settings settings, ThreadPool threadPool, - String ioExecutor, + Executor ioExecutor, BlobCacheMetrics blobCacheMetrics, LongSupplier relativeTimeInNanosSupplier ) { this.threadPool = threadPool; - this.ioExecutor = threadPool.executor(ioExecutor); + this.ioExecutor = ioExecutor; long totalFsSize; try { totalFsSize = FsProbe.getTotal(Environment.getFileStore(environment.nodeDataPaths()[0])); diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index 6c49b50c06e82..346950d385a40 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -94,7 +94,7 @@ public void testBasicEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -175,7 +175,7 @@ public void testAutoEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -219,7 +219,7 @@ public void testForceEviction() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -253,7 +253,7 @@ public void testForceEvictResponse() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -287,7 +287,7 @@ public void testDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -395,7 +395,7 @@ public void testMassiveDecay() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -470,7 +470,7 @@ public void testGetMultiThreaded() throws IOException { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -550,7 +550,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -618,7 +618,7 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -826,7 +826,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -844,7 +844,7 @@ public void testCacheSizeChanges() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -869,7 +869,7 @@ public void testMaybeEvictLeastUsed() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -967,7 +967,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1117,7 +1117,7 @@ public void execute(Runnable command) { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1278,7 +1278,7 @@ public void testPopulate() throws Exception { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { @@ -1394,7 +1394,7 @@ public void testUseFullRegionSize() throws IOException { environment, settings, taskQueue.getThreadPool(), - ThreadPool.Names.GENERIC, + taskQueue.getThreadPool().executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) { @Override @@ -1435,7 +1435,7 @@ public void testSharedSourceInputStreamFactory() throws Exception { environment, settings, threadPool, - ThreadPool.Names.GENERIC, + threadPool.executor(ThreadPool.Names.GENERIC), BlobCacheMetrics.NOOP ) ) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 18ebe65d87986..4eea006b4c2f2 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -331,7 +331,7 @@ public Collection createComponents(PluginServices services) { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), new BlobCacheMetrics(services.telemetryProvider().getMeterRegistry()) ); this.frozenCacheService.set(sharedBlobCacheService); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 5f083d568fed8..41121453e41a4 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -144,7 +144,7 @@ protected SharedBlobCacheService defaultFrozenCacheService() { nodeEnvironment, Settings.EMPTY, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -167,7 +167,7 @@ protected SharedBlobCacheService randomFrozenCacheService() { singlePathNodeEnvironment, cacheSettings.build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } @@ -192,7 +192,7 @@ protected SharedBlobCacheService createFrozenCacheService(final ByteSi .put(SharedBlobCacheService.SHARED_CACHE_RANGE_SIZE_SETTING.getKey(), cacheRangeSize) .build(), threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index 81e9c06a149b9..53ea908ad8801 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -111,7 +111,7 @@ public void testRandomReads() throws IOException { nodeEnvironment, settings, threadPool, - SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME, + threadPool.executor(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME), BlobCacheMetrics.NOOP ); CacheService cacheService = randomCacheService(); From 32b4aa3c448ed5c98854d29d983f903c94649a07 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 26 Aug 2024 13:52:20 +0700 Subject: [PATCH 063/352] Fix TSDBIndexingIT#testTrimId() test failure. (#112194) Sometimes initial indexing results into exactly one segment. However, multiple segments are needed to perform the force merge that purges stored fields for _id field in a later stage of the test. This change tweaks the test such that an extra update is performed after initial indexing. This should always create an extra segment, so that this test can actual purge stored fields for _id field. Closes #112124 --- .../elasticsearch/datastreams/TSDBIndexingIT.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 24c373df72144..a0a0681dbd245 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; @@ -35,6 +36,7 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -457,6 +459,16 @@ public void testTrimId() throws Exception { indexName = bulkResponse.getItems()[0].getIndex(); } client().admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet(); + + // In rare cases we can end up with a single segment shard, which means we can't trim away the _id later. + // So update an existing doc to create a new segment without adding a new document after force merging: + var indexRequest = new IndexRequest(indexName).setIfPrimaryTerm(1L) + .setIfSeqNo((numBulkRequests * numDocsPerBulk) - 1) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source(DOC.replace("$time", formatInstant(time.minusMillis(1))), XContentType.JSON); + var res = client().index(indexRequest).actionGet(); + assertThat(res.status(), equalTo(RestStatus.OK)); + assertThat(res.getVersion(), equalTo(2L)); } // Check whether there are multiple segments: @@ -494,7 +506,7 @@ public void testTrimId() throws Exception { assertThat(retentionLeasesStats.retentionLeases().leases(), hasSize(1)); assertThat( retentionLeasesStats.retentionLeases().leases().iterator().next().retainingSequenceNumber(), - equalTo((long) numBulkRequests * numDocsPerBulk) + equalTo((long) numBulkRequests * numDocsPerBulk + 1) ); }); From 44758b3823a4982e9c62598f184cf90b67ab5d2a Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 26 Aug 2024 10:00:39 +0300 Subject: [PATCH 064/352] Restore useAlternatingSort in `MergingDigest` (#112148) --- .../src/main/java/org/elasticsearch/tdigest/MergingDigest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java index 172b0f24dfd99..fc22bda52e104 100644 --- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java +++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java @@ -92,7 +92,7 @@ public class MergingDigest extends AbstractTDigest { private final int[] order; // if true, alternate upward and downward merge passes - public boolean useAlternatingSort = false; + public boolean useAlternatingSort = true; // if true, use higher working value of compression during construction, then reduce on presentation public boolean useTwoLevelCompression = true; From 54e910636f009034fdb4e6cb6bf279aeee272bdc Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 11:33:37 +0200 Subject: [PATCH 065/352] Unmute SQL CSV spec tests (#112196) Unmute SQL CSV Spec tests fixed by https://github.com/elastic/elasticsearch/pull/111938 --- muted-tests.yml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 68a53e5cb04dd..77ec7800f8a4d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -128,42 +128,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT method: "test {p0=esql/26_aggs_bucket/friendlier BUCKET interval: monthly #110916}" issue: https://github.com/elastic/elasticsearch/issues/111902 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {date.testDateParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111921 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {datetime.testDateTimeParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111922 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {datetime.testDateTimeParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111922 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_1} - issue: https://github.com/elastic/elasticsearch/issues/111918 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {date.testDateParseHaving} - issue: https://github.com/elastic/elasticsearch/issues/111921 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - method: test {agg-ordering.testHistogramDateTimeWithCountAndOrder_2} - issue: https://github.com/elastic/elasticsearch/issues/111919 -- class: org.elasticsearch.xpack.sql.qa.multi_cluster_with_security.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 -- class: org.elasticsearch.xpack.sql.qa.security.JdbcCsvSpecIT - issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT method: testScaledFloat issue: https://github.com/elastic/elasticsearch/issues/112003 From 6b96226140064bcf14d5eb4a696a30be8a4e48cf Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 11:44:42 +0200 Subject: [PATCH 066/352] Fix NPE when executing doc value queries over shape geometries with empty segments (#112139) Return empty scorer instead of null. --- docs/changelog/112139.yaml | 6 ++++ .../lucene/spatial/ShapeDocValuesQuery.java | 17 +++------ .../CartesianShapeDocValuesQueryTests.java | 36 +++++++++++++++++++ .../LatLonShapeDocValuesQueryTests.java | 35 ++++++++++++++++++ 4 files changed, 82 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/112139.yaml diff --git a/docs/changelog/112139.yaml b/docs/changelog/112139.yaml new file mode 100644 index 0000000000000..d6d992ec1dcf2 --- /dev/null +++ b/docs/changelog/112139.yaml @@ -0,0 +1,6 @@ +pr: 112139 +summary: Fix NPE when executing doc value queries over shape geometries with empty + segments +area: Geo +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java index 6804901d9511e..f79d5303ab65a 100644 --- a/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/spatial/ShapeDocValuesQuery.java @@ -15,6 +15,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; @@ -109,11 +110,7 @@ private ConstantScoreWeight getStandardWeight(ScoreMode scoreMode, float boost) @Override public Scorer scorer(LeafReaderContext context) throws IOException { - final ScorerSupplier scorerSupplier = scorerSupplier(context); - if (scorerSupplier == null) { - return null; - } - return scorerSupplier.get(Long.MAX_VALUE); + return scorerSupplier(context).get(Long.MAX_VALUE); } @Override @@ -127,7 +124,7 @@ public Scorer get(long leadCost) throws IOException { // binary doc values allocate an array upfront, lets only allocate it if we are going to use it final BinaryDocValues values = context.reader().getBinaryDocValues(field); if (values == null) { - return null; + return new ConstantScoreScorer(weight, 0f, scoreMode, DocIdSetIterator.empty()); } final GeometryDocValueReader reader = new GeometryDocValueReader(); final Component2DVisitor visitor = Component2DVisitor.getVisitor(component2D, relation, encoder); @@ -171,11 +168,7 @@ private ConstantScoreWeight getContainsWeight(ScoreMode scoreMode, float boost) @Override public Scorer scorer(LeafReaderContext context) throws IOException { - final ScorerSupplier scorerSupplier = scorerSupplier(context); - if (scorerSupplier == null) { - return null; - } - return scorerSupplier.get(Long.MAX_VALUE); + return scorerSupplier(context).get(Long.MAX_VALUE); } @Override @@ -189,7 +182,7 @@ public Scorer get(long leadCost) throws IOException { // binary doc values allocate an array upfront, lets only allocate it if we are going to use it final BinaryDocValues values = context.reader().getBinaryDocValues(field); if (values == null) { - return null; + return new ConstantScoreScorer(weight, 0f, scoreMode, DocIdSetIterator.empty()); } final Component2DVisitor[] visitors = new Component2DVisitor[components2D.size()]; for (int i = 0; i < components2D.size(); i++) { diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java index 4ce3d87d6420d..9ee84fcaa352f 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergeScheduler; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -30,6 +31,7 @@ import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geo.XShapeTestUtil; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.index.mapper.ShapeIndexer; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -54,6 +56,40 @@ public void testEqualsAndHashcode() { QueryUtils.checkUnequal(q1, q4); } + public void testEmptySegment() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // No merges + iwc.setMergeScheduler(NoMergeScheduler.INSTANCE); + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, iwc); + ShapeIndexer indexer = new CartesianShapeIndexer(FIELD_NAME); + Geometry geometry = new org.elasticsearch.geometry.Point(0, 0); + Document document = new Document(); + List fields = indexer.indexShape(geometry); + for (IndexableField field : fields) { + document.add(field); + } + BinaryShapeDocValuesField docVal = new BinaryShapeDocValuesField(FIELD_NAME, CoordinateEncoder.CARTESIAN); + docVal.add(fields, geometry); + document.add(docVal); + w.addDocument(document); + w.flush(); + // add empty segment + w.addDocument(new Document()); + w.flush(); + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + XYRectangle rectangle = new XYRectangle(-10, 10, -10, 10); + for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { + Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, rectangle); + Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, rectangle); + assertQueries(s, indexQuery, docValQuery, 1); + } + IOUtils.close(r, dir); + } + public void testIndexSimpleShapes() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java index 99fab30e3ade2..e00b7fa4736b3 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/LatLonShapeDocValuesQueryTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergeScheduler; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -56,6 +57,40 @@ public void testEqualsAndHashcode() { QueryUtils.checkUnequal(q1, q4); } + public void testEmptySegment() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // No merges + iwc.setMergeScheduler(NoMergeScheduler.INSTANCE); + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, iwc); + GeoShapeIndexer indexer = new GeoShapeIndexer(Orientation.CCW, FIELD_NAME); + Geometry geometry = new org.elasticsearch.geometry.Point(0, 0); + Document document = new Document(); + List fields = indexer.indexShape(geometry); + for (IndexableField field : fields) { + document.add(field); + } + BinaryShapeDocValuesField docVal = new BinaryShapeDocValuesField(FIELD_NAME, CoordinateEncoder.GEO); + docVal.add(fields, geometry); + document.add(docVal); + w.addDocument(document); + w.flush(); + // add empty segment + w.addDocument(new Document()); + w.flush(); + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + Rectangle rectangle = new Rectangle(-10, 10, -10, 10); + for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { + Query indexQuery = LatLonShape.newGeometryQuery(FIELD_NAME, relation, rectangle); + Query docValQuery = new LatLonShapeDocValuesQuery(FIELD_NAME, relation, rectangle); + assertQueries(s, indexQuery, docValQuery, 1); + } + IOUtils.close(r, dir); + } + public void testIndexSimpleShapes() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); // Else seeds may not reproduce: From 785fe5384bda21ff1f7ba52e0fdcf2061506a983 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 12:56:08 +0300 Subject: [PATCH 067/352] Adding support for allow_partial_search_results in PIT (#111516) --- docs/changelog/111516.yaml | 5 + .../search/point-in-time-api.asciidoc | 38 ++++ .../paginate-search-results.asciidoc | 12 +- .../action/search/PointInTimeIT.java | 205 +++++++++++++++++- .../org/elasticsearch/TransportVersions.java | 1 + .../search/AbstractSearchAsyncAction.java | 2 +- .../action/search/ClearScrollController.java | 4 + .../action/search/OpenPointInTimeRequest.java | 26 ++- .../search/OpenPointInTimeResponse.java | 42 +++- .../search/RestOpenPointInTimeAction.java | 1 + .../action/search/SearchContextId.java | 29 ++- .../action/search/SearchContextIdForNode.java | 46 +++- .../TransportOpenPointInTimeAction.java | 40 +++- .../action/search/TransportSearchAction.java | 58 +++-- .../RestOpenPointInTimeActionTests.java | 2 +- .../action/search/SearchContextIdTests.java | 86 ++++++-- .../search/TransportSearchActionTests.java | 6 +- .../elasticsearch/test/ESIntegTestCase.java | 7 + .../execution/sample/CircuitBreakerTests.java | 2 +- .../search/PITAwareQueryClientTests.java | 2 +- .../sequence/CircuitBreakerTests.java | 2 +- .../authz/AuthorizationServiceTests.java | 3 +- .../xpack/sql/analysis/CancellationTests.java | 2 +- .../ClientTransformIndexerTests.java | 2 +- 24 files changed, 530 insertions(+), 93 deletions(-) create mode 100644 docs/changelog/111516.yaml diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml new file mode 100644 index 0000000000000..96e8bd843f750 --- /dev/null +++ b/docs/changelog/111516.yaml @@ -0,0 +1,5 @@ +pr: 111516 +summary: Adding support for `allow_partial_search_results` in PIT +area: Search +type: enhancement +issues: [] diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 2e32324cb44d9..9cd91626c7600 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -78,6 +78,44 @@ IMPORTANT: The open point in time request and each subsequent search request can return different `id`; thus always use the most recently received `id` for the next search request. +In addition to the `keep_alive` parameter, the `allow_partial_search_results` parameter +can also be defined. +This parameter determines whether the <> +should tolerate unavailable shards or <> when +initially creating the PIT. +If set to true, the PIT will be created with the available shards, along with a +reference to any missing ones. +If set to false, the operation will fail if any shard is unavailable. +The default value is false. + +The PIT response includes a summary of the total number of shards, as well as the number +of successful shards when creating the PIT. + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_pit?keep_alive=1m&allow_partial_search_results=true +-------------------------------------------------- +// TEST[setup:my_index] + +[source,js] +-------------------------------------------------- +{ + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=", + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } +} +-------------------------------------------------- +// NOTCONSOLE + +When a PIT that contains shard failures is used in a search request, the missing are +always reported in the search response as a NoShardAvailableActionException exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing +from the previous PIT can be handled, assuming they become available in the meantime. + [[point-in-time-keep-alive]] ==== Keeping point in time alive The `keep_alive` parameter, which is passed to a open point in time request and diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index edd1546dd0854..f69fd60be0484 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -106,9 +106,9 @@ The search response includes an array of `sort` values for each hit: "_id" : "654322", "_score" : null, "_source" : ..., - "sort" : [ + "sort" : [ 1463538855, - "654322" + "654322" ] }, { @@ -118,7 +118,7 @@ The search response includes an array of `sort` values for each hit: "_source" : ..., "sort" : [ <1> 1463538857, - "654323" + "654323" ] } ] @@ -150,7 +150,7 @@ GET twitter/_search -------------------------------------------------- //TEST[continued] -Repeat this process by updating the `search_after` array every time you retrieve a +Repeat this process by updating the `search_after` array every time you retrieve a new page of results. If a <> occurs between these requests, the order of your results may change, causing inconsistent results across pages. To prevent this, you can create a <> to @@ -167,10 +167,12 @@ The API returns a PIT ID. [source,console-result] ---- { - "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + "_shards": ... } ---- // TESTRESPONSE[s/"id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA=="/"id": $body.id/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards"/] To get the first page of results, submit a search request with a `sort` argument. If using a PIT, specify the PIT ID in the `pit.id` parameter and omit diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index a9a5bb074c9ac..da2dfc50d7fe9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -10,12 +10,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -54,11 +58,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; public class PointInTimeIT extends ESIntegTestCase { @@ -84,7 +91,7 @@ public void testBasic() { prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); @@ -130,7 +137,7 @@ public void testMultipleIndices() { prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { int moreDocs = randomIntBetween(10, 50); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { @@ -212,7 +219,7 @@ public void testRelocation() throws Exception { prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); @@ -264,7 +271,7 @@ public void testPointInTimeNotFound() throws Exception { prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)).getPointInTimeId(); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { @@ -305,7 +312,7 @@ public void testIndexNotFound() { prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse( prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), @@ -348,7 +355,7 @@ public void testCanMatch() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); - BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { for (String node : internalCluster().nodesInclude("test")) { for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { @@ -415,7 +422,7 @@ public void testPartialResults() throws Exception { prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)).getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); @@ -447,7 +454,7 @@ public void testPITTiebreak() throws Exception { } } refresh("index-*"); - BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)).getPointInTimeId(); try { for (int size = 1; size <= numIndex; size++) { SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; @@ -532,6 +539,176 @@ public void testOpenPITConcurrentShardRequests() throws Exception { } } + public void testMissingShardsWithPointInTime() throws Exception { + final Settings nodeAttributes = Settings.builder().put("node.attr.foo", "bar").build(); + final String masterNode = internalCluster().startMasterOnlyNode(nodeAttributes); + List dataNodes = internalCluster().startDataOnlyNodes(2, nodeAttributes); + + final String index = "my_test_index"; + // tried to have randomIntBetween(3, 10) but having more shards than 3 was taking forever and throwing timeouts + final int numShards = 3; + final int numReplicas = 0; + // create an index with numShards shards and 0 replicas + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put("index.routing.allocation.require.foo", "bar") + .build() + ); + + // index some documents + int numDocs = randomIntBetween(10, 50); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // create a PIT when all shards are present + OpenPointInTimeResponse pointInTimeResponse = openPointInTime(new String[] { index }, TimeValue.timeValueMinutes(1)); + try { + // ensure that the PIT created has all the shards there + assertThat(numShards, equalTo(pointInTimeResponse.getTotalShards())); + assertThat(numShards, equalTo(pointInTimeResponse.getSuccessfulShards())); + assertThat(0, equalTo(pointInTimeResponse.getFailedShards())); + assertThat(0, equalTo(pointInTimeResponse.getSkippedShards())); + + // make a request using the above PIT + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponse.getPointInTimeId())), + resp -> { + // ensure that al docs are returned + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponse.getPointInTimeId())); + assertHitCount(resp, numDocs); + } + ); + + // pick up a random data node to shut down + final String randomDataNode = randomFrom(dataNodes); + + // find which shards to relocate + final String nodeId = admin().cluster().prepareNodesInfo(randomDataNode).get().getNodes().get(0).getNode().getId(); + Set shardsToRelocate = new HashSet<>(); + for (ShardStats stats : admin().indices().prepareStats(index).get().getShards()) { + if (nodeId.equals(stats.getShardRouting().currentNodeId())) { + shardsToRelocate.add(stats.getShardRouting().shardId().id()); + } + } + + final int shardsRemoved = shardsToRelocate.size(); + + // shut down the random data node + internalCluster().stopNode(randomDataNode); + + // ensure that the index is Red + ensureRed(index); + + // verify that not all documents can now be retrieved + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + }); + + // create a PIT when some shards are missing + OpenPointInTimeResponse pointInTimeResponseOneNodeDown = openPointInTime( + new String[] { index }, + TimeValue.timeValueMinutes(10), + true + ); + try { + // assert that some shards are indeed missing from PIT + assertThat(pointInTimeResponseOneNodeDown.getTotalShards(), equalTo(numShards)); + assertThat(pointInTimeResponseOneNodeDown.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getFailedShards(), equalTo(shardsRemoved)); + assertThat(pointInTimeResponseOneNodeDown.getSkippedShards(), equalTo(0)); + + // ensure that the response now contains fewer documents than the total number of indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + // add another node to the cluster and re-allocate the shards + final String newNodeName = internalCluster().startDataOnlyNode(nodeAttributes); + try { + for (int shardId : shardsToRelocate) { + ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand(index, shardId, newNodeName, true)); + } + ensureGreen(TimeValue.timeValueMinutes(2), index); + + // index some more documents + for (int i = numDocs; i < numDocs * 2; i++) { + String id = Integer.toString(i); + prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(index); + + // ensure that we now see at least numDocs results from the updated index + assertResponse(prepareSearch().setQuery(new MatchAllQueryBuilder()), resp -> { + assertThat(resp.getSuccessfulShards(), equalTo(numShards)); + assertThat(resp.getFailedShards(), equalTo(0)); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, greaterThan((long) numDocs)); + }); + + // ensure that when using the previously created PIT, we'd see the same number of documents as before regardless of the + // newly indexed documents + assertResponse( + prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())), + resp -> { + assertThat(resp.pointInTimeId(), equalTo(pointInTimeResponseOneNodeDown.getPointInTimeId())); + assertThat(resp.getTotalShards(), equalTo(numShards)); + assertThat(resp.getSuccessfulShards(), equalTo(numShards - shardsRemoved)); + assertThat(resp.getFailedShards(), equalTo(shardsRemoved)); + assertThat(resp.getShardFailures().length, equalTo(shardsRemoved)); + for (var failure : resp.getShardFailures()) { + assertTrue(shardsToRelocate.contains(failure.shardId())); + assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + } + assertNotNull(resp.getHits().getTotalHits()); + // we expect less documents as the newly indexed ones should not be part of the PIT + assertThat(resp.getHits().getTotalHits().value, lessThan((long) numDocs)); + } + ); + + Exception exc = expectThrows( + Exception.class, + () -> prepareSearch().setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pointInTimeResponseOneNodeDown.getPointInTimeId())) + .setAllowPartialSearchResults(false) + .get() + ); + assertThat(exc.getCause().getMessage(), containsString("missing shards")); + + } finally { + internalCluster().stopNode(newNodeName); + } + } finally { + closePointInTime(pointInTimeResponseOneNodeDown.getPointInTimeId()); + } + + } finally { + closePointInTime(pointInTimeResponse.getPointInTimeId()); + internalCluster().stopNode(masterNode); + for (String dataNode : dataNodes) { + internalCluster().stopNode(dataNode); + } + } + } + @SuppressWarnings({ "rawtypes", "unchecked" }) private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int size, SortBuilder... sorts) throws Exception { Set seen = new HashSet<>(); @@ -590,10 +767,14 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s assertThat(seen.size(), equalTo(expectedNumDocs)); } - private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { - OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); - final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - return response.getPointInTimeId(); + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive) { + return openPointInTime(indices, keepAlive, false); + } + + private OpenPointInTimeResponse openPointInTime(String[] indices, TimeValue keepAlive, boolean allowPartialSearchResults) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive) + .allowPartialSearchResults(allowPartialSearchResults); + return client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); } private void closePointInTime(BytesReference readerId) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 9fe270e933785..33a16797e7e23 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -195,6 +195,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); + public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 4fd551994e2a0..1e5b5ebbefe48 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -707,7 +707,7 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; final BytesReference searchContextId; if (buildPointInTimeFromSearchResults()) { - searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion); + searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion, failures); } else { if (request.source() != null && request.source().pointInTimeBuilder() != null diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index 04573f72068f3..965b19a69b858 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -166,6 +166,10 @@ public static void closeContexts( final var successes = new AtomicInteger(); try (RefCountingRunnable refs = new RefCountingRunnable(() -> l.onResponse(successes.get()))) { for (SearchContextIdForNode contextId : contextIds) { + if (contextId.getNode() == null) { + // the shard was missing when creating the PIT, ignore. + continue; + } final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); if (node != null) { try { diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index a1cd4df25a25c..146418839f063 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -41,6 +41,8 @@ public final class OpenPointInTimeRequest extends ActionRequest implements Indic private QueryBuilder indexFilter; + private boolean allowPartialSearchResults = false; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = SearchRequest.DEFAULT_INDICES_OPTIONS; public OpenPointInTimeRequest(String... indices) { @@ -60,6 +62,9 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); } + if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + this.allowPartialSearchResults = in.readBoolean(); + } } @Override @@ -76,6 +81,11 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalWriteable(indexFilter); } + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeBoolean(allowPartialSearchResults); + } else if (allowPartialSearchResults) { + throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion()); + } } @Override @@ -180,6 +190,15 @@ public boolean includeDataStreams() { return true; } + public boolean allowPartialSearchResults() { + return allowPartialSearchResults; + } + + public OpenPointInTimeRequest allowPartialSearchResults(boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + return this; + } + @Override public String getDescription() { return "open search context: indices [" + String.join(",", indices) + "] keep_alive [" + keepAlive + "]"; @@ -200,6 +219,8 @@ public String toString() { + ", preference='" + preference + '\'' + + ", allowPartialSearchResults=" + + allowPartialSearchResults + '}'; } @@ -218,12 +239,13 @@ public boolean equals(Object o) { && indicesOptions.equals(that.indicesOptions) && keepAlive.equals(that.keepAlive) && Objects.equals(routing, that.routing) - && Objects.equals(preference, that.preference); + && Objects.equals(preference, that.preference) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults); } @Override public int hashCode() { - int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference); + int result = Objects.hash(indicesOptions, keepAlive, maxConcurrentShardRequests, routing, preference, allowPartialSearchResults); result = 31 * result + Arrays.hashCode(indices); return result; } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index dafcee894c9a6..4a4c0252fb109 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; @@ -18,22 +19,46 @@ import java.util.Base64; import java.util.Objects; +import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; + public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { private final BytesReference pointInTimeId; - public OpenPointInTimeResponse(BytesReference pointInTimeId) { + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + + public OpenPointInTimeResponse( + BytesReference pointInTimeId, + int totalShards, + int successfulShards, + int failedShards, + int skippedShards + ) { this.pointInTimeId = Objects.requireNonNull(pointInTimeId, "Point in time parameter must be not null"); + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.failedShards = failedShards; + this.skippedShards = skippedShards; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(pointInTimeId); + if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("id", Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId))); + buildBroadcastShardsHeader(builder, params, totalShards, successfulShards, failedShards, skippedShards, null); builder.endObject(); return builder; } @@ -42,4 +67,19 @@ public BytesReference getPointInTimeId() { return pointInTimeId; } + public int getTotalShards() { + return totalShards; + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getFailedShards() { + return failedShards; + } + + public int getSkippedShards() { + return skippedShards; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 0e7f3f9111842..5966a1c924745 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -47,6 +47,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC openRequest.routing(request.param("routing")); openRequest.preference(request.param("preference")); openRequest.keepAlive(TimeValue.parseTimeValue(request.param("keep_alive"), null, "keep_alive")); + openRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", false)); if (request.hasParam("max_concurrent_shard_requests")) { final int maxConcurrentShardRequests = request.paramAsInt( "max_concurrent_shard_requests", diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 95d22e8a9034e..2e4dc724413ea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -58,12 +59,30 @@ public boolean contains(ShardSearchContextId contextId) { public static BytesReference encode( List searchPhaseResults, Map aliasFilter, - TransportVersion version + TransportVersion version, + ShardSearchFailure[] shardFailures ) { + assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT) + : "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version [" + + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT + + "] or higher."; try (var out = new BytesStreamOutput()) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); - out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0); + out.writeVInt(shardSize); + for (var searchResult : searchPhaseResults) { + final SearchShardTarget target = searchResult.getSearchShardTarget(); + target.getShardId().writeTo(out); + new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchResult.getContextId()).writeTo(out); + } + if (allowNullContextId) { + for (var failure : shardFailures) { + failure.shard().getShardId().writeTo(out); + new SearchContextIdForNode(failure.shard().getClusterAlias(), null, null).writeTo(out); + } + } out.writeMap(aliasFilter, StreamOutput::writeWriteable); return out.bytes(); } catch (IOException e) { @@ -72,12 +91,6 @@ public static BytesReference encode( } } - private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { - final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); - target.getShardId().writeTo(out); - new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); - } - public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, BytesReference id) { try (var in = new NamedWriteableAwareStreamInput(id.streamInput(), namedWriteableRegistry)) { final TransportVersion version = TransportVersion.readVersion(in); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index 3071362f552ea..a70ddf6ee14b9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,25 +22,59 @@ public final class SearchContextIdForNode implements Writeable { private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + /** + * Contains the details required to retrieve a {@link ShardSearchContextId} for a shard on a specific node. + * + * @param clusterAlias The alias of the cluster, or {@code null} if the shard is local. + * @param node The target node where the search context ID is defined, or {@code null} if the shard is missing or unavailable. + * @param searchContextId The {@link ShardSearchContextId}, or {@code null} if the shard is missing or unavailable. + */ + SearchContextIdForNode(@Nullable String clusterAlias, @Nullable String node, @Nullable ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; } SearchContextIdForNode(StreamInput in) throws IOException { - this.node = in.readString(); + boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + this.node = allowNull ? in.readOptionalString() : in.readString(); this.clusterAlias = in.readOptionalString(); - this.searchContextId = new ShardSearchContextId(in); + this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(node); + boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + if (allowNull) { + out.writeOptionalString(node); + } else { + if (node == null) { + // We should never set a null node if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null node value to a node in version " + + out.getTransportVersion() + + ". The target node must be specified to retrieve the ShardSearchContextId." + ); + } + out.writeString(node); + } out.writeOptionalString(clusterAlias); - searchContextId.writeTo(out); + if (allowNull) { + out.writeOptionalWriteable(searchContextId); + } else { + if (searchContextId == null) { + // We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it. + throw new IOException( + "Cannot write null search context ID to a node in version " + + out.getTransportVersion() + + ". A valid search context ID is required to identify the shard's search context in this version." + ); + } + searchContextId.writeTo(out); + } } + @Nullable public String getNode() { return node; } @@ -49,6 +84,7 @@ public String getClusterAlias() { return clusterAlias; } + @Nullable public ShardSearchContextId getSearchContextId() { return searchContextId; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index a929b774edf5e..717b1805547be 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; @@ -21,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +32,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -50,6 +54,8 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; +import static org.elasticsearch.core.Strings.format; + public class TransportOpenPointInTimeAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportOpenPointInTimeAction.class); @@ -62,6 +68,7 @@ public class TransportOpenPointInTimeAction extends HandledTransportAction listener) { + final ClusterState clusterState = clusterService.state(); + // Check if all the nodes in this cluster know about the service + if (request.allowPartialSearchResults() + && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + listener.onFailure( + new ElasticsearchStatusException( + format( + "The [allow_partial_search_results] parameter cannot be used while the cluster is still upgrading. " + + "Please wait until the upgrade is fully completed and try again." + ), + RestStatus.BAD_REQUEST + ) + ); + return; + } final SearchRequest searchRequest = new SearchRequest().indices(request.indices()) .indicesOptions(request.indicesOptions()) .preference(request.preference()) .routing(request.routing()) - .allowPartialSearchResults(false) + .allowPartialSearchResults(request.allowPartialSearchResults()) .source(new SearchSourceBuilder().query(request.indexFilter())); searchRequest.setMaxConcurrentShardRequests(request.maxConcurrentShardRequests()); searchRequest.setCcsMinimizeRoundtrips(false); transportSearchAction.executeRequest((SearchTask) task, searchRequest, listener.map(r -> { assert r.pointInTimeId() != null : r; - return new OpenPointInTimeResponse(r.pointInTimeId()); + return new OpenPointInTimeResponse( + r.pointInTimeId(), + r.getTotalShards(), + r.getSuccessfulShards(), + r.getFailedShards(), + r.getSkippedShards() + ); }), searchListener -> new OpenPointInTimePhase(request, searchListener)); } @@ -215,7 +245,9 @@ SearchPhase openPointInTimePhase( ) { @Override protected String missingShardsErrorMessage(StringBuilder missingShards) { - return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + missingShards + "]"; + return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 75668f5ebce51..11e767df9c010 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1116,11 +1116,16 @@ static List getRemoteShardsIteratorFromPointInTime( final String clusterAlias = entry.getKey(); assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); final List targetNodes = new ArrayList<>(group.allocatedNodes().size()); - targetNodes.add(perNode.getNode()); - if (perNode.getSearchContextId().getSearcherId() != null) { - for (String node : group.allocatedNodes()) { - if (node.equals(perNode.getNode()) == false) { - targetNodes.add(node); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + targetNodes.add(perNode.getNode()); + if (perNode.getSearchContextId().getSearcherId() != null) { + for (String node : group.allocatedNodes()) { + if (node.equals(perNode.getNode()) == false) { + targetNodes.add(node); + } } } } @@ -1216,7 +1221,7 @@ private void executeSearch( assert searchRequest.pointInTimeBuilder() != null; aliasFilter = resolvedIndices.getSearchContextId().aliasFilter(); concreteLocalIndices = resolvedIndices.getLocalIndices() == null ? new String[0] : resolvedIndices.getLocalIndices().indices(); - localShardIterators = getLocalLocalShardsIteratorFromPointInTime( + localShardIterators = getLocalShardsIteratorFromPointInTime( clusterState, searchRequest.indicesOptions(), searchRequest.getLocalClusterAlias(), @@ -1723,7 +1728,7 @@ private static RemoteTransportException wrapRemoteClusterFailure(String clusterA return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); } - static List getLocalLocalShardsIteratorFromPointInTime( + static List getLocalShardsIteratorFromPointInTime( ClusterState clusterState, IndicesOptions indicesOptions, String localClusterAlias, @@ -1737,25 +1742,30 @@ static List getLocalLocalShardsIteratorFromPointInTime( if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); final List targetNodes = new ArrayList<>(2); - try { - final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); - // Prefer executing shard requests on nodes that are part of PIT first. - if (clusterState.nodes().nodeExists(perNode.getNode())) { - targetNodes.add(perNode.getNode()); - } - if (perNode.getSearchContextId().getSearcherId() != null) { - for (ShardRouting shard : shards) { - if (shard.currentNodeId().equals(perNode.getNode()) == false) { - targetNodes.add(shard.currentNodeId()); + if (perNode.getNode() != null) { + // If the shard was available when the PIT was created, it's included. + // Otherwise, we add the shard iterator without a target node, allowing a partial search failure to + // be thrown when a search phase attempts to access it. + try { + final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + // Prefer executing shard requests on nodes that are part of PIT first. + if (clusterState.nodes().nodeExists(perNode.getNode())) { + targetNodes.add(perNode.getNode()); + } + if (perNode.getSearchContextId().getSearcherId() != null) { + for (ShardRouting shard : shards) { + if (shard.currentNodeId().equals(perNode.getNode()) == false) { + targetNodes.add(shard.currentNodeId()); + } } } - } - } catch (IndexNotFoundException | ShardNotFoundException e) { - // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on - // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards - // when allowPartialSearchResults is false - if (allowPartialSearchResults == false) { - throw e; + } catch (IndexNotFoundException | ShardNotFoundException e) { + // We can hit these exceptions if the index was deleted after creating PIT or the cluster state on + // this coordinating node is outdated. It's fine to ignore these extra "retry-able" target shards + // when allowPartialSearchResults is false + if (allowPartialSearchResults == false) { + throw e; + } } } OriginalIndices finalIndices = new OriginalIndices(new String[] { shardId.getIndexName() }, indicesOptions); diff --git a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java index dda977565af45..e7b5e898684f6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java @@ -31,7 +31,7 @@ public void testMaxConcurrentSearchRequests() { verifyingClient.setExecuteVerifier(((actionType, transportRequest) -> { assertThat(transportRequest, instanceOf(OpenPointInTimeRequest.class)); transportRequests.add((OpenPointInTimeRequest) transportRequest); - return new OpenPointInTimeResponse(new BytesArray("n/a")); + return new OpenPointInTimeResponse(new BytesArray("n/a"), 1, 1, 0, 0); })); { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 32157e09e628f..af7068152648f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -18,6 +19,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; @@ -52,40 +54,82 @@ public void testEncode() { final AtomicArray queryResults = TransportSearchHelperTests.generateQueryResults(); final TransportVersion version = TransportVersion.current(); final Map aliasFilters = new HashMap<>(); + Map shardSearchFailures = new HashMap<>(); + int idx = 0; for (SearchPhaseResult result : queryResults.asList()) { - final AliasFilter aliasFilter; if (randomBoolean()) { - aliasFilter = AliasFilter.of(randomQueryBuilder()); - } else if (randomBoolean()) { - aliasFilter = AliasFilter.of(randomQueryBuilder(), "alias-" + between(1, 10)); + shardSearchFailures.put( + result.getSearchShardTarget(), + new ShardSearchFailure( + new NoShardAvailableActionException(result.getSearchShardTarget().getShardId()), + result.getSearchShardTarget() + ) + ); + queryResults.set(idx, null); } else { - aliasFilter = AliasFilter.EMPTY; - } - if (randomBoolean()) { - aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = AliasFilter.of(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = AliasFilter.of(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } } + idx += 1; } - final BytesReference id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); + final BytesReference id = SearchContextId.encode( + queryResults.asList(), + aliasFilters, + version, + shardSearchFailures.values().toArray(ShardSearchFailure[]::new) + ); final SearchContextId context = SearchContextId.decode(namedWriteableRegistry, id); assertThat(context.shards().keySet(), hasSize(3)); + // TODO assertThat(context.failedShards().keySet(), hasSize(shardsFailed)); assertThat(context.aliasFilter(), equalTo(aliasFilters)); - SearchContextIdForNode node1 = context.shards().get(new ShardId("idx", "uuid1", 2)); + + ShardId shardIdForNode1 = new ShardId("idx", "uuid1", 2); + SearchShardTarget shardTargetForNode1 = new SearchShardTarget("node_1", shardIdForNode1, "cluster_x"); + SearchContextIdForNode node1 = context.shards().get(shardIdForNode1); assertThat(node1.getClusterAlias(), equalTo("cluster_x")); - assertThat(node1.getNode(), equalTo("node_1")); - assertThat(node1.getSearchContextId().getId(), equalTo(1L)); - assertThat(node1.getSearchContextId().getSessionId(), equalTo("a")); + if (shardSearchFailures.containsKey(shardTargetForNode1)) { + assertNull(node1.getNode()); + assertNull(node1.getSearchContextId()); + } else { + assertThat(node1.getNode(), equalTo("node_1")); + assertThat(node1.getSearchContextId().getId(), equalTo(1L)); + assertThat(node1.getSearchContextId().getSessionId(), equalTo("a")); + } - SearchContextIdForNode node2 = context.shards().get(new ShardId("idy", "uuid2", 42)); + ShardId shardIdForNode2 = new ShardId("idy", "uuid2", 42); + SearchShardTarget shardTargetForNode2 = new SearchShardTarget("node_2", shardIdForNode2, "cluster_y"); + SearchContextIdForNode node2 = context.shards().get(shardIdForNode2); assertThat(node2.getClusterAlias(), equalTo("cluster_y")); - assertThat(node2.getNode(), equalTo("node_2")); - assertThat(node2.getSearchContextId().getId(), equalTo(12L)); - assertThat(node2.getSearchContextId().getSessionId(), equalTo("b")); + if (shardSearchFailures.containsKey(shardTargetForNode2)) { + assertNull(node2.getNode()); + assertNull(node2.getSearchContextId()); + } else { + assertThat(node2.getNode(), equalTo("node_2")); + assertThat(node2.getSearchContextId().getId(), equalTo(12L)); + assertThat(node2.getSearchContextId().getSessionId(), equalTo("b")); + } - SearchContextIdForNode node3 = context.shards().get(new ShardId("idy", "uuid2", 43)); + ShardId shardIdForNode3 = new ShardId("idy", "uuid2", 43); + SearchShardTarget shardTargetForNode3 = new SearchShardTarget("node_3", shardIdForNode3, null); + SearchContextIdForNode node3 = context.shards().get(shardIdForNode3); assertThat(node3.getClusterAlias(), nullValue()); - assertThat(node3.getNode(), equalTo("node_3")); - assertThat(node3.getSearchContextId().getId(), equalTo(42L)); - assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + if (shardSearchFailures.containsKey(shardTargetForNode3)) { + assertNull(node3.getNode()); + assertNull(node3.getSearchContextId()); + } else { + assertThat(node3.getNode(), equalTo("node_3")); + assertThat(node3.getSearchContextId().getId(), equalTo(42L)); + assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + } final String[] indices = SearchContextId.decodeIndices(id); assertThat(indices.length, equalTo(3)); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index edd253e945a9b..6621f2055968f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1646,7 +1646,7 @@ public void testLocalShardIteratorFromPointInTime() { } TimeValue keepAlive = randomBoolean() ? null : TimeValue.timeValueSeconds(between(30, 3600)); - final List shardIterators = TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + final List shardIterators = TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, @@ -1691,7 +1691,7 @@ public void testLocalShardIteratorFromPointInTime() { ) ); IndexNotFoundException error = expectThrows(IndexNotFoundException.class, () -> { - TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, @@ -1702,7 +1702,7 @@ public void testLocalShardIteratorFromPointInTime() { }); assertThat(error.getIndex().getName(), equalTo("another-index")); // Ok when some indices don't exist and `allowPartialSearchResults` is true. - Optional anotherShardIterator = TransportSearchAction.getLocalLocalShardsIteratorFromPointInTime( + Optional anotherShardIterator = TransportSearchAction.getLocalShardsIteratorFromPointInTime( clusterState, null, null, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index cf469546b6f63..4bdbc81bcc3f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -956,6 +956,13 @@ public ClusterHealthStatus ensureYellow(String... indices) { return ensureColor(ClusterHealthStatus.YELLOW, TimeValue.timeValueSeconds(30), false, indices); } + /** + * Ensures the cluster has a red state via the cluster health API. + */ + public ClusterHealthStatus ensureRed(String... indices) { + return ensureColor(ClusterHealthStatus.RED, TimeValue.timeValueSeconds(30), false, indices); + } + /** * Ensures the cluster has a yellow state via the cluster health API and ensures the that cluster has no initializing shards * for the given indices diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 1652495197fc0..9cd6549b4be2c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -209,7 +209,7 @@ protected void ActionListener listener ) { if (request instanceof OpenPointInTimeRequest) { - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest) { ClosePointInTimeResponse response = new ClosePointInTimeResponse(true, 1); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index c0e5d398d6508..f1c5d483d4002 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -204,7 +204,7 @@ protected void assertArrayEquals(INDICES, openPIT.indices()); // indices for opening pit should be the same as for the eql query itself openedPIT = true; - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest closePIT) { assertTrue(openedPIT); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index c001b312d5578..ecf5ef61ac49a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -394,7 +394,7 @@ protected void ) { if (request instanceof OpenPointInTimeRequest) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(pitId, 1, 1, 0, 0); listener.onResponse((Response) response); } else if (request instanceof ClosePointInTimeRequest) { ClosePointInTimeResponse response = new ClosePointInTimeResponse(true, 1); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 5f878480a7d0d..3be0a17d19253 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportMultiSearchAction; @@ -3650,7 +3651,7 @@ private static BytesReference createEncodedPIT(Index index) { ); List results = new ArrayList<>(); results.add(testSearchPhaseResult1); - return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current()); + return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current(), ShardSearchFailure.EMPTY_ARRAY); } private static class RBACAuthorizationInfoRoleMatcher implements ArgumentMatcher { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java index 10d6b04d7505c..3e1f910c9f72e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java @@ -190,7 +190,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti doAnswer(invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(new OpenPointInTimeResponse(pitId)); + listener.onResponse(new OpenPointInTimeResponse(pitId, 1, 1, 0, 0)); return null; }).when(client).execute(eq(TransportOpenPointInTimeAction.TYPE), any(), any()); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 062c951f67c96..c8677c2816fc9 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -538,7 +538,7 @@ protected void if (request instanceof OpenPointInTimeRequest) { if (pitSupported) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id")); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id"), 1, 1, 0, 0); listener.onResponse((Response) response); } else { listener.onFailure(new ActionNotFoundTransportException("_pit")); From 030f42576987f1a2c31398cfa1ee9ce3b27fb00d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 26 Aug 2024 12:13:00 +0200 Subject: [PATCH 068/352] Make CompatibilityVersions.minimumVersions cheaper (#112186) This is a significant portion of CS updates at the moment. We should look into avoiding the computation here altogether unless the nodes change, but until then this is a trivial ~5x speedup that saves loads cluster state thread time, especially in integration tests. --- .../version/CompatibilityVersions.java | 37 +++++++++++-------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java index a92cf1ce2e42c..c1489afc6c369 100644 --- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -19,12 +19,9 @@ import java.io.IOException; import java.util.Collection; -import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Wraps component version numbers for cluster state @@ -42,6 +39,8 @@ public record CompatibilityVersions( Map systemIndexMappingsVersion ) implements Writeable, ToXContentFragment { + public static final CompatibilityVersions EMPTY = new CompatibilityVersions(TransportVersions.MINIMUM_COMPATIBLE, Map.of()); + /** * Constructs a VersionWrapper collecting all the minimum versions from the values of the map. * @@ -49,18 +48,26 @@ public record CompatibilityVersions( * @return Minimum versions for the cluster */ public static CompatibilityVersions minimumVersions(Collection compatibilityVersions) { - TransportVersion minimumTransport = compatibilityVersions.stream() - .map(CompatibilityVersions::transportVersion) - .min(Comparator.naturalOrder()) - // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: - .orElse(TransportVersions.MINIMUM_COMPATIBLE); - - Map minimumMappingsVersions = compatibilityVersions.stream() - .flatMap(mv -> mv.systemIndexMappingsVersion().entrySet().stream()) - .collect( - Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (v1, v2) -> Stream.of(v1, v2).min(Comparator.naturalOrder()).get()) - ); - + if (compatibilityVersions.isEmpty()) { + return EMPTY; + } + TransportVersion minimumTransport = null; + Map minimumMappingsVersions = null; + for (CompatibilityVersions cv : compatibilityVersions) { + TransportVersion version = cv.transportVersion(); + if (minimumTransport == null) { + minimumTransport = version; + minimumMappingsVersions = new HashMap<>(cv.systemIndexMappingsVersion()); + continue; + } + if (version.compareTo(minimumTransport) < 0) { + minimumTransport = version; + } + for (Map.Entry entry : cv.systemIndexMappingsVersion().entrySet()) { + minimumMappingsVersions.merge(entry.getKey(), entry.getValue(), (v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2); + } + } + // transportVersions is always non-null since we break out on empty above return new CompatibilityVersions(minimumTransport, minimumMappingsVersions); } From 48c32f133e4111b65b3d5f0548051a224d4d9b71 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 26 Aug 2024 12:14:11 +0200 Subject: [PATCH 069/352] Speedup string interning in `ClusterName` (#112045) If we want to intern here, we should use the deduplicator to speed things up. --- .../src/main/java/org/elasticsearch/cluster/ClusterName.java | 3 ++- .../main/java/org/elasticsearch/common/settings/Settings.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java index 711c2a7fee8e0..dd4194b60e6ac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -39,7 +39,8 @@ public ClusterName(StreamInput input) throws IOException { } public ClusterName(String value) { - this.value = value.intern(); + // cluster name string is most likely part of a setting so we can speed things up over outright interning here + this.value = Settings.internKeyOrValue(value); } public String value() { diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index be8292f02bb59..1df7b27304fd0 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -1567,7 +1567,7 @@ private static String toString(Object o) { * @param s string to intern * @return interned string */ - static String internKeyOrValue(String s) { + public static String internKeyOrValue(String s) { return settingLiteralDeduplicator.deduplicate(s); } From 8431c3645406e56fe6b1147732783652b699bceb Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 12:54:18 +0200 Subject: [PATCH 070/352] Support docvalues only query in shape field (#112199) --- docs/changelog/112199.yaml | 5 ++ .../search/ShapeQueryOverShapeTests.java | 8 +-- .../index/mapper/ShapeFieldMapper.java | 15 +++++- .../index/query/ShapeQueryProcessor.java | 49 +++++-------------- .../spatial/ingest/CircleProcessorTests.java | 10 +--- 5 files changed, 38 insertions(+), 49 deletions(-) create mode 100644 docs/changelog/112199.yaml diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml new file mode 100644 index 0000000000000..eb22f215f9828 --- /dev/null +++ b/docs/changelog/112199.yaml @@ -0,0 +1,5 @@ +pr: 112199 +summary: Support docvalues only query in shape field +area: Geo +type: enhancement +issues: [] diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java index 554c9ff2904dc..1c013aba52261 100644 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java +++ b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/ShapeQueryOverShapeTests.java @@ -51,16 +51,18 @@ public class ShapeQueryOverShapeTests extends ShapeQueryTestCase { @Override protected XContentBuilder createDefaultMapping() throws Exception { - XContentBuilder xcb = XContentFactory.jsonBuilder() + final boolean isIndexed = randomBoolean(); + final boolean hasDocValues = isIndexed == false || randomBoolean(); + return XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject(defaultFieldName) .field("type", "shape") + .field("index", isIndexed) + .field("doc_values", hasDocValues) .endObject() .endObject() .endObject(); - - return xcb; } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index 91a118f964064..ab57efee527dc 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; @@ -162,7 +163,19 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { - return queryProcessor.shapeQuery(shape, fieldName, relation, context, hasDocValues()); + failIfNotIndexedNorDocValuesFallback(context); + // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { + throw new QueryShardException( + context, + ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]." + ); + } + try { + return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + } catch (IllegalArgumentException e) { + throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); + } } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java index cd09b74e99591..25a0e55c027f5 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java @@ -14,51 +14,26 @@ import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; -import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper; public class ShapeQueryProcessor { - public Query shapeQuery( - Geometry geometry, - String fieldName, - ShapeRelation relation, - SearchExecutionContext context, - boolean hasDocValues - ) { - validateIsShapeFieldType(fieldName, context); - // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { - throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); - } + public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; if (geometry == null || geometry.isEmpty()) { return new MatchNoDocsQuery(); } - final XYGeometry[] luceneGeometries; - try { - luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - } catch (IllegalArgumentException e) { - throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); - } - Query query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + Query query; + if (indexed) { + query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); } return query; } - - private void validateIsShapeFieldType(String fieldName, SearchExecutionContext context) { - MappedFieldType fieldType = context.getFieldType(fieldName); - if (fieldType instanceof ShapeFieldMapper.ShapeFieldType == false) { - throw new QueryShardException( - context, - "Expected " + ShapeFieldMapper.CONTENT_TYPE + " field type for Field [" + fieldName + "] but found " + fieldType.typeName() - ); - } - } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java index 20b1a906b1dab..e71b4f0f4e981 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.GeoShapeIndexer; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.RandomDocumentPicks; @@ -39,7 +38,6 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper.GeoShapeWithDocValuesFieldType; -import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper.ShapeFieldType; import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import java.io.IOException; @@ -244,17 +242,13 @@ public void testShapeQuery() throws IOException { int numSides = randomIntBetween(4, 1000); Geometry geometry = CircleUtils.createRegularShapePolygon(circle, numSides); - MappedFieldType shapeType = new ShapeFieldType(fieldName, true, true, Orientation.RIGHT, null, Collections.emptyMap()); - ShapeQueryProcessor processor = new ShapeQueryProcessor(); - SearchExecutionContext mockedContext = mock(SearchExecutionContext.class); - when(mockedContext.getFieldType(any())).thenReturn(shapeType); - Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, mockedContext, true); + Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); Query centerPointQuery = processor.shapeQuery( new Point(circle.getLon(), circle.getLat()), fieldName, ShapeRelation.INTERSECTS, - mockedContext, + true, true ); From 554eb4f693fd6934103fd3cf1e580f088cc8e064 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 13:00:18 +0200 Subject: [PATCH 071/352] ES|QL: better validation of GROK patterns (#112200) Catch exceptions when building GROK with a wrong pattern, and emit a client exception with a meaningful error message. Fixes https://github.com/elastic/elasticsearch/issues/112111 --- docs/changelog/112200.yaml | 6 ++++++ .../xpack/esql/parser/LogicalPlanBuilder.java | 8 +++++++- .../xpack/esql/parser/StatementParserTests.java | 5 +++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112200.yaml diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml new file mode 100644 index 0000000000000..0c2c3d71e3ddf --- /dev/null +++ b/docs/changelog/112200.yaml @@ -0,0 +1,6 @@ +pr: 112200 +summary: "ES|QL: better validation of GROK patterns" +area: ES|QL +type: bug +issues: + - 112111 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 0c4272a05a44e..ffd2375a688ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -57,6 +57,7 @@ import org.elasticsearch.xpack.esql.plan.logical.meta.MetaFunctions; import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.joni.exception.SyntaxException; import java.util.ArrayList; import java.util.Arrays; @@ -153,7 +154,12 @@ public PlanFactory visitGrokCommand(EsqlBaseParser.GrokCommandContext ctx) { return p -> { Source source = source(ctx); String pattern = visitString(ctx.string()).fold().toString(); - Grok.Parser grokParser = Grok.pattern(source, pattern); + Grok.Parser grokParser; + try { + grokParser = Grok.pattern(source, pattern); + } catch (SyntaxException e) { + throw new ParsingException(source, "Invalid grok pattern [{}]: [{}]", pattern, e.getMessage()); + } validateGrokPattern(source, grokParser, pattern); Grok result = new Grok(source(ctx), p, expression(ctx.primaryExpression()), grokParser); return result; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 3860088bf130c..a5ef7900a1a78 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -791,6 +791,11 @@ public void testGrokPattern() { "line 1:22: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + " the attribute [foo] is defined multiple times with different types" ); + + expectError( + "row a = \"foo\" | GROK a \"(?P.+)\"", + "line 1:18: Invalid grok pattern [(?P.+)]: [undefined group option]" + ); } public void testLikeRLike() { From de73cda9c39ad45c88535e9310f6ecaa3883b3f0 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 26 Aug 2024 13:02:38 +0200 Subject: [PATCH 072/352] SQL: make date format functions more strict (#112140) --- .../src/main/resources/datetime.csv-spec | 18 +++++ .../scalar/datetime/DateFormatter.java | 27 ++++--- .../datetime/DateTimeFormatProcessor.java | 4 +- .../datetime/NamedDateTimeProcessor.java | 4 +- .../scalar/datetime/ToCharFormatter.java | 76 ++++++++++--------- 5 files changed, 79 insertions(+), 50 deletions(-) diff --git a/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec index 90280676ff6d4..a67ab25bcf66a 100644 --- a/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec +++ b/x-pack/plugin/sql/qa/server/src/main/resources/datetime.csv-spec @@ -1922,3 +1922,21 @@ SELECT hire_date FROM test_emp WHERE '2005-12-14T00:00:00.000Z'::datetime BETWEE ------------------------ ; + +// checking regressions after https://github.com/elastic/elasticsearch/pull/110222 +selectDateFunctionsCldr +schema::month:s|day_of_week:s|day:s|week:s|ad:s|day_of_week2:s|month2:s|ad2:s +SELECT DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%M') AS month, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%W') AS day_of_week, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%w') AS day, +DATE_FORMAT('2020-04-05T11:22:33.123Z'::date, '%v') AS week, +DATETIME_FORMAT('2020-04-05T11:22:33.123Z'::date, 'G') AS ad, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'Day') AS day_of_week2, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'Month') AS month2, +TO_CHAR('2020-04-05T11:22:33.123Z'::date, 'BC') AS ad2; + + month | day_of_week | day | week | ad | day_of_week2 | month2 | ad2 +--------------+-----------------+---------+---------+--------+----------------+--------------+------------ + April | Sunday | 0 | 14 | AD | "Sunday " | "April " | AD +; + diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java index 7016d6cb49e46..68f9acd165888 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateFormatter.java @@ -42,7 +42,7 @@ class DateFormatter { new Builder().pattern("%e").javaPattern("d").build(), new Builder().pattern("%f") .javaPattern("n") - .additionalMapper(s -> String.format(Locale.ROOT, "%06d", Math.round(Integer.parseInt(s) / 1000.0))) + .additionalMapper(s -> String.format(Locale.ENGLISH, "%06d", Math.round(Integer.parseInt(s) / 1000.0))) .build(), new Builder().pattern("%H").javaPattern("HH").build(), new Builder().pattern("%h").javaPattern("hh").build(), @@ -59,19 +59,28 @@ class DateFormatter { new Builder().pattern("%s").javaPattern("ss").build(), new Builder().pattern("%T").javaPattern("HH:mm:ss").build(), new Builder().pattern("%U") - .javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfYear()))) .build(), - new Builder().pattern("%u").javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.ISO.weekOfYear()))).build(), + new Builder().pattern("%u") + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfYear()))) + .build(), + new Builder().pattern("%V") - .javaFormat(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfWeekBasedYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekOfWeekBasedYear()))) + .build(), + new Builder().pattern("%v") + .javaFormat(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.of(DayOfWeek.MONDAY, 4).weekOfWeekBasedYear()))) .build(), - new Builder().pattern("%v").javaPattern("ww").build(), new Builder().pattern("%W").javaPattern("EEEE").build(), - new Builder().pattern("%w").javaPattern("e").additionalMapper(s -> Integer.parseInt(s) == 7 ? String.valueOf(0) : s).build(), + new Builder().pattern("%w") + .javaFormat(t -> String.format(Locale.ENGLISH, "%01d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).dayOfWeek()) - 1)) + .build(), new Builder().pattern("%X") - .javaFormat(t -> String.format(Locale.ROOT, "%04d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekBasedYear()))) + .javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.SUNDAY, 7).weekBasedYear()))) + .build(), + new Builder().pattern("%x") + .javaFormat(t -> String.format(Locale.ENGLISH, "%04d", t.get(WeekFields.of(DayOfWeek.MONDAY, 7).weekBasedYear()))) .build(), - new Builder().pattern("%x").javaPattern("Y").build(), new Builder().pattern("%Y").javaPattern("yyyy").build(), new Builder().pattern("%y").javaPattern("yy").build() ); @@ -162,7 +171,7 @@ private Builder pattern(String pattern) { } private Builder javaPattern(String javaPattern) { - this.javaFormat = temporalAccessor -> DateTimeFormatter.ofPattern(javaPattern, Locale.ROOT).format(temporalAccessor); + this.javaFormat = temporalAccessor -> DateTimeFormatter.ofPattern(javaPattern, Locale.ENGLISH).format(temporalAccessor); return this; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java index 14ae0d3d1ed16..422e23f064cbf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFormatProcessor.java @@ -85,7 +85,7 @@ protected Function formatterFor(String pattern) { return null; } final String javaPattern = msToJavaPattern(pattern); - return DateTimeFormatter.ofPattern(javaPattern, Locale.ROOT)::format; + return DateTimeFormatter.ofPattern(javaPattern, Locale.ENGLISH)::format; } }, DATE_FORMAT { @@ -97,7 +97,7 @@ protected Function formatterFor(String pattern) { DATE_TIME_FORMAT { @Override protected Function formatterFor(String pattern) { - return DateTimeFormatter.ofPattern(pattern, Locale.ROOT)::format; + return DateTimeFormatter.ofPattern(pattern, Locale.ENGLISH)::format; } }, TO_CHAR { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java index 1d2e2735d86e3..c62f4303d93d6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -40,8 +40,8 @@ public final String extract(ZonedDateTime millis, String tzId) { } public static final String NAME = "ndt"; - private static final DateTimeFormatter DAY_NAME_FORMATTER = DateTimeFormatter.ofPattern("EEEE", Locale.ROOT); - private static final DateTimeFormatter MONTH_NAME_FORMATTER = DateTimeFormatter.ofPattern("MMMM", Locale.ROOT); + private static final DateTimeFormatter DAY_NAME_FORMATTER = DateTimeFormatter.ofPattern("EEEE", Locale.ENGLISH); + private static final DateTimeFormatter MONTH_NAME_FORMATTER = DateTimeFormatter.ofPattern("MMMM", Locale.ENGLISH); private final NameExtractor extractor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java index 6e7b0fcb47a06..4c9e851f54e36 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/ToCharFormatter.java @@ -41,7 +41,7 @@ class ToCharFormatter { of("HH12").formatFn("hh").numeric(), of("HH24").formatFn("HH").numeric(), of("MI").formatFn("mm").numeric(), - of("SS").formatFn("s", x -> String.format(Locale.ROOT, "%02d", parseInt(x))).numeric(), + of("SS").formatFn("s", x -> String.format(Locale.ENGLISH, "%02d", parseInt(x))).numeric(), of("MS").formatFn("n", nano -> firstDigitsOfNanos(nano, 3)).numericWithLeadingZeros(), of("US").formatFn("n", nano -> firstDigitsOfNanos(nano, 6)).numericWithLeadingZeros(), of("FF1").formatFn("n", nano -> firstDigitsOfNanos(nano, 1)).numericWithLeadingZeros(), @@ -52,14 +52,14 @@ class ToCharFormatter { of("FF6").formatFn("n", nano -> firstDigitsOfNanos(nano, 6)).numericWithLeadingZeros(), of("SSSSS").formatFn("A", milliSecondOfDay -> String.valueOf(parseInt(milliSecondOfDay) / 1000)).numeric(), of("SSSS").formatFn("A", milliSecondOfDay -> String.valueOf(parseInt(milliSecondOfDay) / 1000)).numeric(), - of("AM").formatFn("a", x -> x.toUpperCase(Locale.ROOT)).text(), - of("am").formatFn("a", x -> x.toLowerCase(Locale.ROOT)).text(), - of("PM").formatFn("a", x -> x.toUpperCase(Locale.ROOT)).text(), - of("pm").formatFn("a", x -> x.toLowerCase(Locale.ROOT)).text(), + of("AM").formatFn("a", x -> x.toUpperCase(Locale.ENGLISH)).text(), + of("am").formatFn("a", x -> x.toLowerCase(Locale.ENGLISH)).text(), + of("PM").formatFn("a", x -> x.toUpperCase(Locale.ENGLISH)).text(), + of("pm").formatFn("a", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("A.M.").formatFn("a", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("a.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("a.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("P.M.").formatFn("a", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("p.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("p.m.").formatFn("a", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("Y,YYY").formatFn("yyyy", year -> year.charAt(0) + "," + year.substring(1)).numericWithLeadingZeros(), of("YYYY").formatFn("yyyy").numeric(), of("YYY").formatFn("yyyy", year -> year.substring(1)).numeric(), @@ -70,51 +70,53 @@ class ToCharFormatter { of("IY").formatFn(t -> lastNCharacter(absoluteWeekBasedYear(t), 2)).numeric(), of("I").formatFn(t -> lastNCharacter(absoluteWeekBasedYear(t), 1)).numeric(), of("BC").formatFn("G").text(), - of("bc").formatFn("G", x -> x.toLowerCase(Locale.ROOT)).text(), + of("bc").formatFn("G", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("AD").formatFn("G").text(), - of("ad").formatFn("G", x -> x.toLowerCase(Locale.ROOT)).text(), + of("ad").formatFn("G", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("B.C.").formatFn("G", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("b.c.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), + of("b.c.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), of("A.D.").formatFn("G", x -> x.charAt(0) + "." + x.charAt(1) + ".").text(), - of("a.d.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ROOT)).text(), - of("MONTH").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x.toUpperCase(Locale.ROOT))).text(), - of("Month").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x)).text(), - of("month").formatFn("MMMM", x -> String.format(Locale.ROOT, "%-9s", x.toLowerCase(Locale.ROOT))).text(), - of("MON").formatFn("MMM", x -> x.toUpperCase(Locale.ROOT)).text(), + of("a.d.").formatFn("G", x -> (x.charAt(0) + "." + x.charAt(1) + ".").toLowerCase(Locale.ENGLISH)).text(), + of("MONTH").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x.toUpperCase(Locale.ENGLISH))).text(), + of("Month").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x)).text(), + of("month").formatFn("MMMM", x -> String.format(Locale.ENGLISH, "%-9s", x.toLowerCase(Locale.ENGLISH))).text(), + of("MON").formatFn("MMM", x -> x.toUpperCase(Locale.ENGLISH)).text(), of("Mon").formatFn("MMM").text(), - of("mon").formatFn("MMM", x -> x.toLowerCase(Locale.ROOT)).text(), + of("mon").formatFn("MMM", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("MM").formatFn("MM").numeric(), - of("DAY").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x.toUpperCase(Locale.ROOT))).text(), - of("Day").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x)).text(), - of("day").formatFn("EEEE", x -> String.format(Locale.ROOT, "%-9s", x.toLowerCase(Locale.ROOT))).text(), - of("DY").formatFn("E", x -> x.toUpperCase(Locale.ROOT)).text(), + of("DAY").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x.toUpperCase(Locale.ENGLISH))).text(), + of("Day").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x)).text(), + of("day").formatFn("EEEE", x -> String.format(Locale.ENGLISH, "%-9s", x.toLowerCase(Locale.ENGLISH))).text(), + of("DY").formatFn("E", x -> x.toUpperCase(Locale.ENGLISH)).text(), of("Dy").formatFn("E").text(), - of("dy").formatFn("E", x -> x.toLowerCase(Locale.ROOT)).text(), + of("dy").formatFn("E", x -> x.toLowerCase(Locale.ENGLISH)).text(), of("DDD").formatFn("DDD").numeric(), of("IDDD").formatFn( t -> String.format( - Locale.ROOT, + Locale.ENGLISH, "%03d", (t.get(WeekFields.ISO.weekOfWeekBasedYear()) - 1) * 7 + t.get(ChronoField.DAY_OF_WEEK) ) ).numeric(), - of("DD").formatFn("d", x -> String.format(Locale.ROOT, "%02d", parseInt(x))).numeric(), + of("DD").formatFn("d", x -> String.format(Locale.ENGLISH, "%02d", parseInt(x))).numeric(), of("ID").formatFn(t -> String.valueOf(t.get(ChronoField.DAY_OF_WEEK))).numeric(), of("D").formatFn(t -> String.valueOf(t.get(WeekFields.SUNDAY_START.dayOfWeek()))).numeric(), of("W").formatFn(t -> String.valueOf(t.get(ChronoField.ALIGNED_WEEK_OF_MONTH))).numeric(), - of("WW").formatFn(t -> String.format(Locale.ROOT, "%02d", t.get(ChronoField.ALIGNED_WEEK_OF_YEAR))).numeric(), - of("IW").formatFn(t -> String.format(Locale.ROOT, "%02d", t.get(WeekFields.ISO.weekOfWeekBasedYear()))).numeric(), + of("WW").formatFn(t -> String.format(Locale.ENGLISH, "%02d", t.get(ChronoField.ALIGNED_WEEK_OF_YEAR))).numeric(), + of("IW").formatFn(t -> String.format(Locale.ENGLISH, "%02d", t.get(WeekFields.ISO.weekOfWeekBasedYear()))).numeric(), of("CC").formatFn(t -> { int century = yearToCentury(t.get(ChronoField.YEAR)); - return String.format(Locale.ROOT, century < 0 ? "%03d" : "%02d", century); + return String.format(Locale.ENGLISH, century < 0 ? "%03d" : "%02d", century); }).numeric(), of("J").formatFn(t -> String.valueOf(t.getLong(JulianFields.JULIAN_DAY))).numeric(), of("Q").formatFn("Q").numeric(), - of("RM").formatFn("MM", month -> String.format(Locale.ROOT, "%-4s", monthToRoman(parseInt(month)))).text(), - of("rm").formatFn("MM", month -> String.format(Locale.ROOT, "%-4s", monthToRoman(parseInt(month)).toLowerCase(Locale.ROOT))) - .text(), + of("RM").formatFn("MM", month -> String.format(Locale.ENGLISH, "%-4s", monthToRoman(parseInt(month)))).text(), + of("rm").formatFn( + "MM", + month -> String.format(Locale.ENGLISH, "%-4s", monthToRoman(parseInt(month)).toLowerCase(Locale.ENGLISH)) + ).text(), of("TZ").formatFn(ToCharFormatter::zoneAbbreviationOf).text(), - of("tz").formatFn(t -> zoneAbbreviationOf(t).toLowerCase(Locale.ROOT)).text(), + of("tz").formatFn(t -> zoneAbbreviationOf(t).toLowerCase(Locale.ENGLISH)).text(), of("TZH").acceptsLowercase(false).formatFn("ZZ", s -> s.substring(0, 3)).text(), of("TZM").acceptsLowercase(false).formatFn("ZZ", s -> lastNCharacter(s, 2)).text(), of("OF").acceptsLowercase(false).formatFn("ZZZZZ", ToCharFormatter::formatOffset).offset() @@ -127,7 +129,7 @@ class ToCharFormatter { // also index the lower case version of the patterns if accepted for (ToCharFormatter formatter : formatters) { if (formatter.acceptsLowercase) { - formatterMap.putIfAbsent(formatter.pattern.toLowerCase(Locale.ROOT), formatter); + formatterMap.putIfAbsent(formatter.pattern.toLowerCase(Locale.ENGLISH), formatter); } } FORMATTER_MAP = formatterMap; @@ -274,8 +276,8 @@ private static String appendOrdinalSuffix(String defaultSuffix, String s) { // the Y,YYY pattern might can cause problems with the parsing, but thankfully the last 3 // characters is enough to calculate the suffix int i = parseInt(lastNCharacter(s, 3)); - final boolean upperCase = defaultSuffix.equals(defaultSuffix.toUpperCase(Locale.ROOT)); - return s + (upperCase ? ordinalSuffix(i).toUpperCase(Locale.ROOT) : ordinalSuffix(i)); + final boolean upperCase = defaultSuffix.equals(defaultSuffix.toUpperCase(Locale.ENGLISH)); + return s + (upperCase ? ordinalSuffix(i).toUpperCase(Locale.ENGLISH) : ordinalSuffix(i)); } catch (NumberFormatException ex) { return s + defaultSuffix; } @@ -312,11 +314,11 @@ private static String removeLeadingZerosFromOffset(String offset) { private static String absoluteWeekBasedYear(TemporalAccessor t) { int year = t.get(IsoFields.WEEK_BASED_YEAR); year = year > 0 ? year : -(year - 1); - return String.format(Locale.ROOT, "%04d", year); + return String.format(Locale.ENGLISH, "%04d", year); } private static String firstDigitsOfNanos(String nano, int digits) { - return String.format(Locale.ROOT, "%09d", parseInt(nano)).substring(0, digits); + return String.format(Locale.ENGLISH, "%09d", parseInt(nano)).substring(0, digits); } private static String lastNCharacter(String s, int n) { @@ -324,7 +326,7 @@ private static String lastNCharacter(String s, int n) { } private static String zoneAbbreviationOf(TemporalAccessor temporalAccessor) { - String zone = ZoneId.from(temporalAccessor).getDisplayName(TextStyle.SHORT, Locale.ROOT); + String zone = ZoneId.from(temporalAccessor).getDisplayName(TextStyle.SHORT, Locale.ENGLISH); return "Z".equals(zone) ? "UTC" : zone; } @@ -344,7 +346,7 @@ public Builder formatFn(final String javaPattern) { public Builder formatFn(final String javaPattern, final Function additionalMapper) { this.formatFn = temporalAccessor -> { - String formatted = DateTimeFormatter.ofPattern(javaPattern != null ? javaPattern : "'" + pattern + "'", Locale.ROOT) + String formatted = DateTimeFormatter.ofPattern(javaPattern != null ? javaPattern : "'" + pattern + "'", Locale.ENGLISH) .format(temporalAccessor); return additionalMapper == null ? formatted : additionalMapper.apply(formatted); }; From b685a436ce462c874d88c9ca22a07b37ec5187d3 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 15:18:47 +0300 Subject: [PATCH 073/352] Adding RankDocsRetrieverBuilder and RankDocsQuery (#111709) --- .../retriever/RankDocRetrieverBuilderIT.java | 755 ++++++++++++++++++ .../search/retriever/RetrieverRewriteIT.java | 11 + server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 1 + .../elasticsearch/common/lucene/Lucene.java | 3 + .../uhighlight/CustomUnifiedHighlighter.java | 5 +- .../elasticsearch/search/SearchModule.java | 5 + .../search/builder/SearchSourceBuilder.java | 6 +- .../elasticsearch/search/rank/RankDoc.java | 49 +- .../search/rank/feature/RankFeatureDoc.java | 14 +- .../search/retriever/KnnRetrieverBuilder.java | 14 + .../retriever/RankDocsRetrieverBuilder.java | 146 ++++ .../search/retriever/RetrieverBuilder.java | 33 + .../retriever/StandardRetrieverBuilder.java | 11 + .../retriever/rankdoc/RankDocsQuery.java | 199 +++++ .../rankdoc/RankDocsQueryBuilder.java | 111 +++ .../rankdoc/RankDocsSortBuilder.java | 113 +++ .../retriever/rankdoc/RankDocsSortField.java | 101 +++ .../search/SearchPhaseControllerTests.java | 5 +- .../action/search/SearchRequestTests.java | 16 + .../search/SearchServiceTests.java | 48 +- .../search/query/QuerySearchResultTests.java | 6 +- .../search/rank/RankDocTests.java | 57 ++ .../KnnRetrieverBuilderParsingTests.java | 32 + .../RankDocsRetrieverBuilderTests.java | 165 ++++ .../StandardRetrieverBuilderParsingTests.java | 31 + .../rankdoc/RankDocsQueryBuilderTests.java | 120 +++ .../rankdoc/RankDocsSortBuilderTests.java | 71 ++ .../search/rank/TestRankDoc.java | 45 -- .../search/rank/TestRankShardResult.java | 6 +- .../retriever/TestRetrieverBuilder.java | 6 + .../random/RandomRankRetrieverBuilder.java | 8 +- .../TextSimilarityRankRetrieverBuilder.java | 62 +- ...xtSimilarityRankRetrieverBuilderTests.java | 145 +++- .../70_text_similarity_rank_retriever.yml | 1 + .../xpack/rank/rrf/RRFRankDoc.java | 34 + .../xpack/rank/rrf/RRFRetrieverBuilder.java | 6 + .../xpack/rank/rrf/RRFRankDocTests.java | 5 + 38 files changed, 2351 insertions(+), 96 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java create mode 100644 server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java new file mode 100644 index 0000000000000..fa4cafc66c822 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RankDocRetrieverBuilderIT.java @@ -0,0 +1,755 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.MockSearchService; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.equalTo; + +public class RankDocRetrieverBuilderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(MockSearchService.TestPlugin.class); + } + + public record RetrieverSource(RetrieverBuilder retriever, SearchSourceBuilder source) {} + + private static String INDEX = "test_index"; + private static final String ID_FIELD = "_id"; + private static final String DOC_FIELD = "doc"; + private static final String TEXT_FIELD = "text"; + private static final String VECTOR_FIELD = "vector"; + private static final String TOPIC_FIELD = "topic"; + private static final String LAST_30D_FIELD = "views.last30d"; + private static final String ALL_TIME_FIELD = "views.all"; + + @Before + public void setup() throws Exception { + String mapping = """ + { + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "element_type": "float", + "index": true, + "similarity": "l2_norm", + "index_options": { + "type": "hnsw" + } + }, + "text": { + "type": "text" + }, + "doc": { + "type": "keyword" + }, + "topic": { + "type": "keyword" + }, + "views": { + "type": "nested", + "properties": { + "last30d": { + "type": "integer" + }, + "all": { + "type": "integer" + } + } + } + } + } + """; + createIndex(INDEX, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).build()); + admin().indices().preparePutMapping(INDEX).setSource(mapping, XContentType.JSON).get(); + indexDoc( + INDEX, + "doc_1", + DOC_FIELD, + "doc_1", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "the quick brown fox jumps over the lazy dog", + LAST_30D_FIELD, + 100 + ); + indexDoc( + INDEX, + "doc_2", + DOC_FIELD, + "doc_2", + TOPIC_FIELD, + "astronomy", + TEXT_FIELD, + "you know, for Search!", + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + LAST_30D_FIELD, + 3 + ); + indexDoc(INDEX, "doc_3", DOC_FIELD, "doc_3", TOPIC_FIELD, "technology", VECTOR_FIELD, new float[] { 6.0f, 6.0f, 6.0f }); + indexDoc( + INDEX, + "doc_4", + DOC_FIELD, + "doc_4", + TOPIC_FIELD, + "technology", + TEXT_FIELD, + "aardvark is a really awesome animal, but not very quick", + ALL_TIME_FIELD, + 100, + LAST_30D_FIELD, + 40 + ); + indexDoc(INDEX, "doc_5", DOC_FIELD, "doc_5", TOPIC_FIELD, "science", TEXT_FIELD, "irrelevant stuff"); + indexDoc( + INDEX, + "doc_6", + DOC_FIELD, + "doc_6", + TEXT_FIELD, + "quick quick quick quick search", + VECTOR_FIELD, + new float[] { 10.0f, 30.0f, 100.0f }, + LAST_30D_FIELD, + 15 + ); + indexDoc( + INDEX, + "doc_7", + DOC_FIELD, + "doc_7", + TOPIC_FIELD, + "biology", + TEXT_FIELD, + "dog", + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + ALL_TIME_FIELD, + 1000 + ); + refresh(INDEX); + } + + public void testRankDocsRetrieverBasicWithPagination() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 and with pagination, we'd just omit the first result + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + // include some pagination as well + source.from(1); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverWithAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 1; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_2")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(2L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithCollapse() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + // with collapsing on topic field we would have 6, 2, 1, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.collapse( + new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getHits().length, equalTo(4)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(1).field(TOPIC_FIELD).getValue().toString(), equalTo("astronomy")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).field(TOPIC_FIELD).getValue().toString(), equalTo("technology")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getHits().length, equalTo(3)); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(1).getId(), equalTo("doc_3")); + assertThat(resp.getHits().getAt(2).getInnerHits().get("a").getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).field(TOPIC_FIELD).getValue().toString(), equalTo("biology")); + }); + } + + public void testRankDocsRetrieverWithCollapseAndAggs() { + // same as above, but we only want to bring back the top result from each subsearch + // so that would be 1, 2, and 7 + // and final rank would be (based on score): 2, 1, 7 + // aggs should still account for the same docs as the testRankDocsRetriever test, i.e. all but doc_5 + final int rankWindowSize = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1 and 6 as doc_4 is collapsed to doc_1 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + standard0.collapseBuilder = new CollapseBuilder(TOPIC_FIELD).setInnerHits( + new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.aggregation(new TermsAggregationBuilder("topic").field(TOPIC_FIELD)); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertNotNull(resp.getAggregations()); + assertNotNull(resp.getAggregations().get("topic")); + Terms terms = resp.getAggregations().get("topic"); + // doc_3 is not part of the final aggs computation as it is only retrieved through the knn retriever + // and is outside of the rank window + assertThat(terms.getBucketByKey("technology").getDocCount(), equalTo(3L)); + assertThat(terms.getBucketByKey("astronomy").getDocCount(), equalTo(1L)); + assertThat(terms.getBucketByKey("biology").getDocCount(), equalTo(1L)); + }); + } + + public void testRankDocsRetrieverWithNestedQuery() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(10L), ScoreMode.Avg) + .innerHit(new InnerHitBuilder("a").addSort(new FieldSortBuilder(DOC_FIELD).order(SortOrder.DESC)).setSize(10)); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 3, 7 + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ) + ); + source.fetchField(TOPIC_FIELD); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverMultipleCompoundRetrievers() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, and 6 + standard0.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("quick").defaultField(TEXT_FIELD)) + .boost(10L); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 2 and 6 due to prefilter + standard1.queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.termsQuery(ID_FIELD, "doc_2", "doc_3", "doc_6")).boost(20L); + standard1.preFilterQueryBuilders.add(QueryBuilders.queryStringQuery("search").defaultField(TEXT_FIELD)); + // this one retrieves docs 7, 2, 3, and 6 + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + VECTOR_FIELD, + new float[] { 3.0f, 3.0f, 3.0f }, + null, + 10, + 100, + null + ); + // the compound retriever here produces a score for a doc based on the percentage of the queries that it was matched on and + // resolves ties based on actual score, rank, and then the doc (we're forcing 1 shard for consistent results) + // so ideal rank would be: 6, 2, 1, 4, 7, 3 + CompoundRetrieverWithRankDocs compoundRetriever1 = new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList( + new RetrieverSource(standard0, null), + new RetrieverSource(standard1, null), + new RetrieverSource(knnRetrieverBuilder, null) + ) + ); + // simple standard retriever that would have the doc_4 as its first (and only) result + StandardRetrieverBuilder standard2 = new StandardRetrieverBuilder(); + standard2.queryBuilder = QueryBuilders.queryStringQuery("aardvark").defaultField(TEXT_FIELD); + + // combining the two retrievers would bring doc_4 at the top as it would be the only one present in both doc sets + // the rest of the docs would be sorted based on their ranks as they have the same score (1/2) + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(compoundRetriever1, null), new RetrieverSource(standard2, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(6L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_2")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(5).getId(), equalTo("doc_3")); + }); + } + + public void testRankDocsRetrieverDifferentNestedSorting() { + final int rankWindowSize = 100; + SearchSourceBuilder source = new SearchSourceBuilder(); + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder(); + // this one retrieves docs 1, 4, 6, 2 + standard0.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(LAST_30D_FIELD).gt(0), ScoreMode.Avg); + standard0.sortBuilders = List.of( + new FieldSortBuilder(LAST_30D_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.DESC) + ); + StandardRetrieverBuilder standard1 = new StandardRetrieverBuilder(); + // this one retrieves docs 4, 7 + standard1.queryBuilder = QueryBuilders.nestedQuery("views", QueryBuilders.rangeQuery(ALL_TIME_FIELD).gt(0), ScoreMode.Avg); + standard1.sortBuilders = List.of( + new FieldSortBuilder(ALL_TIME_FIELD).setNestedSort(new NestedSortBuilder("views")).order(SortOrder.ASC) + ); + + source.retriever( + new CompoundRetrieverWithRankDocs( + rankWindowSize, + Arrays.asList(new RetrieverSource(standard0, null), new RetrieverSource(standard1, null)) + ) + ); + + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getTotalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO)); + assertThat(resp.getHits().getAt(0).getId(), equalTo("doc_4")); + assertThat(resp.getHits().getAt(1).getId(), equalTo("doc_1")); + assertThat(resp.getHits().getAt(2).getId(), equalTo("doc_7")); + assertThat(resp.getHits().getAt(3).getId(), equalTo("doc_6")); + assertThat(resp.getHits().getAt(4).getId(), equalTo("doc_2")); + }); + } + + class CompoundRetrieverWithRankDocs extends RetrieverBuilder { + + private final List sources; + private final int rankWindowSize; + + private CompoundRetrieverWithRankDocs(int rankWindowSize, List sources) { + this.rankWindowSize = rankWindowSize; + this.sources = Collections.unmodifiableList(sources); + } + + @Override + public boolean isCompound() { + return true; + } + + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (ctx.getPointInTimeBuilder() == null) { + throw new IllegalStateException("PIT is required"); + } + + // Rewrite prefilters + boolean hasChanged = false; + var newPreFilters = rewritePreFilters(ctx); + hasChanged |= newPreFilters != preFilterQueryBuilders; + + // Rewrite retriever sources + List newRetrievers = new ArrayList<>(); + for (var entry : sources) { + RetrieverBuilder newRetriever = entry.retriever.rewrite(ctx); + if (newRetriever != entry.retriever) { + newRetrievers.add(new RetrieverSource(newRetriever, null)); + hasChanged |= newRetriever != entry.retriever; + } else if (newRetriever == entry.retriever) { + var sourceBuilder = entry.source != null + ? entry.source + : createSearchSourceBuilder(ctx.getPointInTimeBuilder(), newRetriever); + var rewrittenSource = sourceBuilder.rewrite(ctx); + newRetrievers.add(new RetrieverSource(newRetriever, rewrittenSource)); + hasChanged |= rewrittenSource != entry.source; + } + } + if (hasChanged) { + return new CompoundRetrieverWithRankDocs(rankWindowSize, newRetrievers); + } + + // execute searches + final SetOnce results = new SetOnce<>(); + final MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + for (var entry : sources) { + SearchRequest searchRequest = new SearchRequest().source(entry.source); + // The can match phase can reorder shards, so we disable it to ensure the stable ordering + searchRequest.setPreFilterShardSize(Integer.MAX_VALUE); + multiSearchRequest.add(searchRequest); + } + ctx.registerAsyncAction((client, listener) -> { + client.execute(TransportMultiSearchAction.TYPE, multiSearchRequest, new ActionListener<>() { + @Override + public void onResponse(MultiSearchResponse items) { + List topDocs = new ArrayList<>(); + for (int i = 0; i < items.getResponses().length; i++) { + var item = items.getResponses()[i]; + var rankDocs = getRankDocs(item.getResponse()); + sources.get(i).retriever().setRankDocs(rankDocs); + topDocs.add(rankDocs); + } + results.set(combineResults(topDocs)); + listener.onResponse(null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + }); + + return new RankDocsRetrieverBuilder( + rankWindowSize, + newRetrievers.stream().map(s -> s.retriever).toList(), + results::get, + newPreFilters + ); + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + throw new UnsupportedOperationException("should not be called"); + } + + @Override + public String getName() { + return "compound_retriever"; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + + } + + @Override + protected boolean doEquals(Object o) { + return false; + } + + @Override + protected int doHashCode() { + return 0; + } + + private RankDoc[] getRankDocs(SearchResponse searchResponse) { + assert searchResponse != null; + int size = Math.min(rankWindowSize, searchResponse.getHits().getHits().length); + RankDoc[] docs = new RankDoc[size]; + for (int i = 0; i < size; i++) { + var hit = searchResponse.getHits().getAt(i); + long sortValue = (long) hit.getRawSortValues()[hit.getRawSortValues().length - 1]; + int doc = decodeDoc(sortValue); + int shardRequestIndex = decodeShardRequestIndex(sortValue); + docs[i] = new RankDoc(doc, hit.getScore(), shardRequestIndex); + docs[i].rank = i + 1; + } + return docs; + } + + public static int decodeDoc(long value) { + return (int) value; + } + + public static int decodeShardRequestIndex(long value) { + return (int) (value >> 32); + } + + record RankDocAndHitRatio(RankDoc rankDoc, float hitRatio) {} + + /** + * Combines the provided {@code rankResults} to return the final top documents. + */ + public RankDoc[] combineResults(List rankResults) { + int totalQueries = rankResults.size(); + final float step = 1.0f / totalQueries; + Map docsToRankResults = Maps.newMapWithExpectedSize(rankWindowSize); + for (var rankResult : rankResults) { + for (RankDoc scoreDoc : rankResult) { + docsToRankResults.compute(new RankDoc.RankKey(scoreDoc.doc, scoreDoc.shardIndex), (key, value) -> { + if (value == null) { + RankDoc res = new RankDoc(scoreDoc.doc, scoreDoc.score, scoreDoc.shardIndex); + res.rank = scoreDoc.rank; + return new RankDocAndHitRatio(res, step); + } else { + RankDoc res = new RankDoc(scoreDoc.doc, Math.max(scoreDoc.score, value.rankDoc.score), scoreDoc.shardIndex); + res.rank = Math.min(scoreDoc.rank, value.rankDoc.rank); + return new RankDocAndHitRatio(res, value.hitRatio + step); + } + }); + } + } + // sort the results based on hit ratio, then doc, then rank, and final tiebreaker is based on smaller doc id + RankDocAndHitRatio[] sortedResults = docsToRankResults.values().toArray(RankDocAndHitRatio[]::new); + Arrays.sort(sortedResults, (RankDocAndHitRatio doc1, RankDocAndHitRatio doc2) -> { + if (doc1.hitRatio != doc2.hitRatio) { + return doc1.hitRatio < doc2.hitRatio ? 1 : -1; + } + if (false == (Float.isNaN(doc1.rankDoc.score) || Float.isNaN(doc2.rankDoc.score)) + && (doc1.rankDoc.score != doc2.rankDoc.score)) { + return doc1.rankDoc.score < doc2.rankDoc.score ? 1 : -1; + } + if (doc1.rankDoc.rank != doc2.rankDoc.rank) { + return doc1.rankDoc.rank < doc2.rankDoc.rank ? -1 : 1; + } + return doc1.rankDoc.doc < doc2.rankDoc.doc ? -1 : 1; + }); + // trim the results if needed, otherwise each shard will always return `rank_window_size` results. + // pagination and all else will happen on the coordinator when combining the shard responses + RankDoc[] topResults = new RankDoc[Math.min(rankWindowSize, sortedResults.length)]; + for (int rank = 0; rank < topResults.length; ++rank) { + topResults[rank] = sortedResults[rank].rankDoc; + topResults[rank].rank = rank + 1; + topResults[rank].score = sortedResults[rank].hitRatio; + } + return topResults; + } + } + + private SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit).trackTotalHits(false).size(100); + retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, false); + + // Record the shard id in the sort result + List> sortBuilders = sourceBuilder.sorts() != null ? new ArrayList<>(sourceBuilder.sorts()) : new ArrayList<>(); + if (sortBuilders.isEmpty()) { + sortBuilders.add(new ScoreSortBuilder()); + } + sortBuilders.add(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + sourceBuilder.sort(sortBuilders); + return sourceBuilder; + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java index 00013a8d396ba..e618a1b75cc4d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.plugins.Plugin; @@ -141,6 +142,11 @@ private AssertingRetrieverBuilder(RetrieverBuilder innerRetriever) { this.innerRetriever = innerRetriever; } + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { assertNull(ctx.getPointInTimeBuilder()); @@ -200,6 +206,11 @@ public boolean isCompound() { return true; } + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { assertNotNull(ctx.getPointInTimeBuilder()); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index d29009cd76b8d..c223db531e688 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -364,6 +364,7 @@ exports org.elasticsearch.search.rank.rerank; exports org.elasticsearch.search.rescore; exports org.elasticsearch.search.retriever; + exports org.elasticsearch.search.retriever.rankdoc; exports org.elasticsearch.search.runtime; exports org.elasticsearch.search.searchafter; exports org.elasticsearch.search.slice; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 33a16797e7e23..78f1b21ea7a44 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -196,6 +196,7 @@ static TransportVersion def(int id) { public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); + public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 36b3076c29a31..acdc3e32ea31a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -73,6 +73,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortField; import org.elasticsearch.search.sort.ShardDocSortField; import java.io.IOException; @@ -551,6 +552,8 @@ private static SortField rewriteMergeSortField(SortField sortField) { return newSortField; } else if (sortField.getClass() == ShardDocSortField.class) { return new SortField(sortField.getField(), SortField.Type.LONG, sortField.getReverse()); + } else if (sortField.getClass() == RankDocsSortField.class) { + return new SortField(sortField.getField(), SortField.Type.INT, sortField.getReverse()); } else { return sortField; } diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java index 07eec973c77e0..304a48335fd11 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -32,6 +32,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.search.runtime.AbstractScriptFieldQuery; import org.elasticsearch.search.vectors.KnnScoreDocQuery; @@ -255,10 +256,10 @@ public void visitLeaf(Query leafQuery) { hasUnknownLeaf[0] = true; } /** - * KnnScoreDocQuery requires the same reader that built the docs + * KnnScoreDocQuery and RankDocsQuery requires the same reader that built the docs * When using {@link HighlightFlag#WEIGHT_MATCHES} different readers are used and isn't supported by this query */ - if (leafQuery instanceof KnnScoreDocQuery) { + if (leafQuery instanceof KnnScoreDocQuery || leafQuery instanceof RankDocsQuery) { hasUnknownLeaf[0] = true; } super.visitLeaf(query); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index bac5fe8c1d1ac..33c64f3eb6350 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -237,6 +237,8 @@ import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -851,10 +853,12 @@ private void registerRescorer(RescorerSpec spec) { } private void registerRankers() { + namedWriteables.add(new NamedWriteableRegistry.Entry(RankDoc.class, RankDoc.NAME, RankDoc::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(RankDoc.class, RankFeatureDoc.NAME, RankFeatureDoc::new)); namedWriteables.add( new NamedWriteableRegistry.Entry(RankShardResult.class, RankFeatureShardResult.NAME, RankFeatureShardResult::new) ); + namedWriteables.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RankDocsQueryBuilder.NAME, RankDocsQueryBuilder::new)); } private void registerSorts() { @@ -862,6 +866,7 @@ private void registerSorts() { namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(SortBuilder.class, RankDocsSortBuilder.NAME, RankDocsSortBuilder::new)); } private static void registerFromPlugin(List plugins, Function> producer, Consumer consumer) { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 355267a43a8f4..909b6a7882a34 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -2228,15 +2228,15 @@ public ActionRequestValidationException validate( if (sorts() != null) { specified.add(SORT_FIELD.getPreferredName()); } - if (rescores() != null) { - specified.add(RESCORE_FIELD.getPreferredName()); - } if (minScore() != null) { specified.add(MIN_SCORE_FIELD.getPreferredName()); } if (rankBuilder() != null) { specified.add(RANK_FIELD.getPreferredName()); } + if (rescores() != null) { + specified.add(RESCORE_FIELD.getPreferredName()); + } if (specified.isEmpty() == false) { validationException = addValidationError( "cannot specify [" + RETRIEVER.getPreferredName() + "] and " + specified, diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java index 50b3ddc0f370a..02c03223738b5 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java @@ -8,29 +8,37 @@ package org.elasticsearch.search.rank; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; /** * {@code RankDoc} is the base class for all ranked results. - * Subclasses should extend this with additional information - * required for their global ranking method. + * Subclasses should extend this with additional information required for their global ranking method. */ -public abstract class RankDoc extends ScoreDoc implements NamedWriteable { +public class RankDoc extends ScoreDoc implements NamedWriteable, ToXContentFragment { + + public static final String NAME = "rank_doc"; public static final int NO_RANK = -1; /** - * If this document has been ranked, this is its final - * rrf ranking from all the result sets. + * If this document has been ranked, this is its final rrf ranking from all the result sets. */ public int rank = NO_RANK; + @Override + public String getWriteableName() { + return NAME; + } + public record RankKey(int doc, int shardIndex) {} public RankDoc(int doc, float score, int shardIndex) { @@ -51,7 +59,26 @@ public final void writeTo(StreamOutput out) throws IOException { doWriteTo(out); } - protected abstract void doWriteTo(StreamOutput out) throws IOException; + protected void doWriteTo(StreamOutput out) throws IOException {}; + + /** + * Explain the ranking of this document. + */ + public Explanation explain() { + return Explanation.match(rank, "doc [" + doc + "] with an original score of [" + score + "] is at rank [" + rank + "]."); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("_rank", rank); + builder.field("_doc", doc); + builder.field("_shard", shardIndex); + builder.field("_score", score); + doToXContent(builder, params); + return builder; + } + + protected void doToXContent(XContentBuilder builder, Params params) throws IOException {} @Override public final boolean equals(Object o) { @@ -61,17 +88,21 @@ public final boolean equals(Object o) { return doc == rd.doc && score == rd.score && shardIndex == rd.shardIndex && rank == rd.rank && doEquals(rd); } - protected abstract boolean doEquals(RankDoc rd); + protected boolean doEquals(RankDoc rd) { + return true; + } @Override public final int hashCode() { return Objects.hash(doc, score, shardIndex, doHashCode()); } - protected abstract int doHashCode(); + protected int doHashCode() { + return 0; + } @Override public String toString() { - return "RankDoc{" + "score=" + score + ", doc=" + doc + ", shardIndex=" + shardIndex + '}'; + return "RankDoc{" + "_rank=" + rank + ", _doc=" + doc + ", _shard=" + shardIndex + ", _score=" + score + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java index d8b4ec10410f1..8b0cc33844f82 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java +++ b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureDoc.java @@ -8,9 +8,11 @@ package org.elasticsearch.search.rank.feature; +import org.apache.lucene.search.Explanation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -22,7 +24,7 @@ public class RankFeatureDoc extends RankDoc { public static final String NAME = "rank_feature_doc"; - // todo: update to support more than 1 fields; and not restrict to string data + // TODO: update to support more than 1 fields; and not restrict to string data public String featureData; public RankFeatureDoc(int doc, float score, int shardIndex) { @@ -34,6 +36,11 @@ public RankFeatureDoc(StreamInput in) throws IOException { featureData = in.readOptionalString(); } + @Override + public Explanation explain() { + throw new UnsupportedOperationException("explain is not supported for {" + getClass() + "}"); + } + public void featureData(String featureData) { this.featureData = featureData; } @@ -58,4 +65,9 @@ protected int doHashCode() { public String getWriteableName() { return NAME; } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("featureData", featureData); + } } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index b369324b3ee52..f2a2a606c0e01 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -10,7 +10,11 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; import org.elasticsearch.search.vectors.VectorData; @@ -120,6 +124,16 @@ public String getName() { return NAME; } + @Override + public QueryBuilder topDocsQuery() { + assert rankDocs != null : "{rankDocs} should have been materialized at this point"; + + BoolQueryBuilder knnTopResultsQuery = new BoolQueryBuilder().filter(new RankDocsQueryBuilder(rankDocs)) + .should(new ExactKnnQueryBuilder(VectorData.fromFloats(queryVector), field, similarity)); + preFilterQueryBuilders.forEach(knnTopResultsQuery::filter); + return knnTopResultsQuery; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { KnnSearchBuilder knnSearchBuilder = new KnnSearchBuilder( diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java new file mode 100644 index 0000000000000..d1f6a41dc789f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DisMaxQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * An {@link RetrieverBuilder} that is used to retrieve documents based on the rank of the documents. + */ +public class RankDocsRetrieverBuilder extends RetrieverBuilder { + + public static final String NAME = "rank_docs_retriever"; + private final int rankWindowSize; + final List sources; + final Supplier rankDocs; + + public RankDocsRetrieverBuilder( + int rankWindowSize, + List sources, + Supplier rankDocs, + List preFilterQueryBuilders + ) { + this.rankWindowSize = rankWindowSize; + this.rankDocs = rankDocs; + this.sources = sources; + this.preFilterQueryBuilders = preFilterQueryBuilders; + } + + @Override + public String getName() { + return NAME; + } + + private boolean sourceShouldRewrite(QueryRewriteContext ctx) throws IOException { + for (var source : sources) { + if (source.isCompound()) { + return true; + } + var newSource = source.rewrite(ctx); + if (newSource != source) { + return true; + } + } + return false; + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + assert false == sourceShouldRewrite(ctx) : "retriever sources should be rewritten first"; + var rewrittenFilters = rewritePreFilters(ctx); + if (rewrittenFilters != preFilterQueryBuilders) { + return new RankDocsRetrieverBuilder(rankWindowSize, sources, rankDocs, rewrittenFilters); + } + return this; + } + + @Override + public QueryBuilder topDocsQuery() { + // this is used to fetch all documents form the parent retrievers (i.e. sources) + // so that we can use all the matched documents to compute aggregations, nested hits etc + DisMaxQueryBuilder disMax = new DisMaxQueryBuilder().tieBreaker(0f); + for (var retriever : sources) { + var query = retriever.topDocsQuery(); + if (query != null) { + if (retriever.retrieverName() != null) { + query.queryName(retriever.retrieverName()); + } + disMax.add(query); + } + } + // ignore prefilters of this level, they are already propagated to children + return disMax; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + // here we force a custom sort based on the rank of the documents + // TODO: should we adjust to account for other fields sort options just for the top ranked docs? + if (searchSourceBuilder.rescores() == null || searchSourceBuilder.rescores().isEmpty()) { + searchSourceBuilder.sort(Arrays.asList(new RankDocsSortBuilder(rankDocs.get()), new ScoreSortBuilder())); + } + if (searchSourceBuilder.explain() != null && searchSourceBuilder.explain()) { + searchSourceBuilder.trackScores(true); + } + BoolQueryBuilder boolQuery = new BoolQueryBuilder(); + RankDocsQueryBuilder rankQuery = new RankDocsQueryBuilder(rankDocs.get()); + // if we have aggregations we need to compute them based on all doc matches, not just the top hits + // so we just post-filter the top hits based on the rank queries we have + if (searchSourceBuilder.aggregations() != null) { + boolQuery.should(rankQuery); + // compute a disjunction of all the query sources that were executed to compute the top rank docs + QueryBuilder disjunctionOfSources = topDocsQuery(); + if (disjunctionOfSources != null) { + boolQuery.should(disjunctionOfSources); + } + // post filter the results so that the top docs are still the same + searchSourceBuilder.postFilter(rankQuery); + } else { + boolQuery.must(rankQuery); + } + // add any prefilters present in the retriever + for (var preFilterQueryBuilder : preFilterQueryBuilders) { + boolQuery.filter(preFilterQueryBuilder); + } + searchSourceBuilder.query(boolQuery); + } + + @Override + protected boolean doEquals(Object o) { + RankDocsRetrieverBuilder other = (RankDocsRetrieverBuilder) o; + return Arrays.equals(rankDocs.get(), other.rankDocs.get()) + && sources.equals(other.sources) + && rankWindowSize == other.rankWindowSize; + } + + @Override + protected int doHashCode() { + return Objects.hash(super.hashCode(), Arrays.hashCode(rankDocs.get()), sources, rankWindowSize); + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException("toXContent() is not supported for " + this.getClass()); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 3a9979030683a..0857ef21adaaf 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.FilterXContentParserWrapper; import org.elasticsearch.xcontent.NamedObjectNotFoundException; @@ -191,6 +192,34 @@ public boolean isCompound() { return false; } + protected RankDoc[] rankDocs = null; + + public RetrieverBuilder() {} + + protected final List rewritePreFilters(QueryRewriteContext ctx) throws IOException { + List newFilters = new ArrayList<>(preFilterQueryBuilders.size()); + boolean changed = false; + for (var filter : preFilterQueryBuilders) { + var newFilter = filter.rewrite(ctx); + changed |= filter != newFilter; + newFilters.add(newFilter); + } + if (changed) { + return newFilters; + } + return preFilterQueryBuilders; + } + + /** + * This function is called by compound {@link RetrieverBuilder} to return the original query that + * was used by this retriever to compute its top documents. + */ + public abstract QueryBuilder topDocsQuery(); + + public void setRankDocs(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + /** * Gets the filters for this retriever. */ @@ -254,5 +283,9 @@ public String toString() { return Strings.toString(this, true, true); } + public String retrieverName() { + return retrieverName; + } + // ---- END FOR TESTING ---- } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java index 4694780770617..682d456295ba9 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java @@ -106,6 +106,17 @@ public static StandardRetrieverBuilder fromXContent(XContentParser parser, Retri Float minScore; CollapseBuilder collapseBuilder; + @Override + public QueryBuilder topDocsQuery() { + // TODO: for compound retrievers this will have to be reworked as queries like knn could be executed twice + if (preFilterQueryBuilders.isEmpty()) { + return queryBuilder; + } + var ret = new BoolQueryBuilder().filter(queryBuilder); + preFilterQueryBuilders.stream().forEach(ret::filter); + return ret; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { if (preFilterQueryBuilders.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java new file mode 100644 index 0000000000000..77da1cc80bc97 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.rank.RankDoc; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * A {@code RankDocsQuery} returns the top k documents in the order specified by the global doc IDs. + * This is used by retrievers that compute a score for a large document set, and need access to just the top results, + * after performing any reranking or filtering. + */ +public class RankDocsQuery extends Query { + + private final RankDoc[] docs; + private final int[] segmentStarts; + private final Object contextIdentity; + + /** + * Creates a {@code RankDocsQuery} based on the provided docs. + * + * @param docs the global doc IDs of documents that match, in ascending order + * @param segmentStarts the indexes in docs and scores corresponding to the first matching + * document in each segment. If a segment has no matching documents, it should be assigned + * the index of the next segment that does. There should be a final entry that is always + * docs.length-1. + * @param contextIdentity an object identifying the reader context that was used to build this + * query + */ + RankDocsQuery(RankDoc[] docs, int[] segmentStarts, Object contextIdentity) { + this.docs = docs; + this.segmentStarts = segmentStarts; + this.contextIdentity = contextIdentity; + } + + @Override + public Query rewrite(IndexSearcher searcher) throws IOException { + if (docs.length == 0) { + return new MatchNoDocsQuery(); + } + return this; + } + + RankDoc[] rankDocs() { + return docs; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + if (searcher.getIndexReader().getContext().id() != contextIdentity) { + throw new IllegalStateException("This RankDocsDocQuery was created by a different reader"); + } + return new Weight(this) { + + @Override + public int count(LeafReaderContext context) { + return segmentStarts[context.ord + 1] - segmentStarts[context.ord]; + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) { + int found = Arrays.binarySearch(docs, doc + context.docBase, (a, b) -> Integer.compare(((RankDoc) a).doc, (int) b)); + if (found < 0) { + return Explanation.noMatch("doc not found in top " + docs.length + " rank docs"); + } + return docs[found].explain(); + } + + @Override + public Scorer scorer(LeafReaderContext context) { + // Segment starts indicate how many docs are in the segment, + // upper equalling lower indicates no documents for this segment + if (segmentStarts[context.ord] == segmentStarts[context.ord + 1]) { + return null; + } + return new Scorer(this) { + final int lower = segmentStarts[context.ord]; + final int upper = segmentStarts[context.ord + 1]; + int upTo = -1; + float score; + + @Override + public DocIdSetIterator iterator() { + return new DocIdSetIterator() { + @Override + public int docID() { + return currentDocId(); + } + + @Override + public int nextDoc() { + if (upTo == -1) { + upTo = lower; + } else { + ++upTo; + } + return currentDocId(); + } + + @Override + public int advance(int target) throws IOException { + return slowAdvance(target); + } + + @Override + public long cost() { + return upper - lower; + } + }; + } + + @Override + public float getMaxScore(int docId) { + if (docId != NO_MORE_DOCS) { + docId += context.docBase; + } + float maxScore = 0; + for (int idx = Math.max(lower, upTo); idx < upper && docs[idx].doc <= docId; idx++) { + maxScore = Math.max(maxScore, docs[idx].score); + } + return maxScore; + } + + @Override + public float score() { + return docs[upTo].score; + } + + @Override + public int docID() { + return currentDocId(); + } + + private int currentDocId() { + if (upTo == -1) { + return -1; + } + if (upTo >= upper) { + return NO_MORE_DOCS; + } + return docs[upTo].doc - context.docBase; + } + + }; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + }; + } + + @Override + public String toString(String field) { + return this.getClass().getSimpleName() + "{rank_docs:" + Arrays.toString(docs) + "}"; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + } + + @Override + public boolean equals(Object obj) { + if (sameClassAs(obj) == false) { + return false; + } + return Arrays.equals(docs, ((RankDocsQuery) obj).docs) + && Arrays.equals(segmentStarts, ((RankDocsQuery) obj).segmentStarts) + && contextIdentity == ((RankDocsQuery) obj).contextIdentity; + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), Arrays.hashCode(docs), Arrays.hashCode(segmentStarts), contextIdentity); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java new file mode 100644 index 0000000000000..ff2085bc8903f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; + +public class RankDocsQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "rank_docs_query"; + + private final RankDoc[] rankDocs; + + public RankDocsQueryBuilder(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + + public RankDocsQueryBuilder(StreamInput in) throws IOException { + super(in); + this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new); + } + + RankDoc[] rankDocs() { + return rankDocs; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeArray(StreamOutput::writeNamedWriteable, rankDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + RankDoc[] shardRankDocs = Arrays.stream(rankDocs) + .filter(r -> r.shardIndex == context.getShardRequestIndex()) + .sorted(Comparator.comparingInt(r -> r.doc)) + .toArray(RankDoc[]::new); + IndexReader reader = context.getIndexReader(); + int[] segmentStarts = findSegmentStarts(reader, shardRankDocs); + return new RankDocsQuery(shardRankDocs, segmentStarts, reader.getContext().id()); + } + + private static int[] findSegmentStarts(IndexReader reader, RankDoc[] docs) { + int[] starts = new int[reader.leaves().size() + 1]; + starts[starts.length - 1] = docs.length; + if (starts.length == 2) { + return starts; + } + int resultIndex = 0; + for (int i = 1; i < starts.length - 1; i++) { + int upper = reader.leaves().get(i).docBase; + resultIndex = Arrays.binarySearch(docs, resultIndex, docs.length, upper, (a, b) -> Integer.compare(((RankDoc) a).doc, (int) b)); + if (resultIndex < 0) { + resultIndex = -1 - resultIndex; + } + starts[i] = resultIndex; + } + return starts; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.startArray("rank_docs"); + for (RankDoc doc : rankDocs) { + builder.startObject(); + doc.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + + @Override + protected boolean doEquals(RankDocsQueryBuilder other) { + return Arrays.equals(rankDocs, other.rankDocs); + } + + @Override + protected int doHashCode() { + return Arrays.hashCode(rankDocs); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANK_DOCS_RETRIEVER; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java new file mode 100644 index 0000000000000..0122e6ee9ea12 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilder.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.BucketedSort; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortFieldAndFormat; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Builds a {@code RankDocsSortField} that sorts documents by their rank as computed through the {@code RankDocsRetrieverBuilder}. + */ +public class RankDocsSortBuilder extends SortBuilder { + public static final String NAME = "rank_docs_sort"; + + private RankDoc[] rankDocs; + + public RankDocsSortBuilder(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + } + + public RankDocsSortBuilder(StreamInput in) throws IOException { + this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new); + } + + public RankDocsSortBuilder(RankDocsSortBuilder original) { + this.rankDocs = original.rankDocs; + } + + public RankDocsSortBuilder rankDocs(RankDoc[] rankDocs) { + this.rankDocs = rankDocs; + return this; + } + + public RankDoc[] rankDocs() { + return this.rankDocs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray(StreamOutput::writeNamedWriteable, rankDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public SortBuilder rewrite(QueryRewriteContext ctx) throws IOException { + return this; + } + + @Override + protected SortFieldAndFormat build(SearchExecutionContext context) throws IOException { + RankDoc[] shardRankDocs = Arrays.stream(rankDocs) + .filter(r -> r.shardIndex == context.getShardRequestIndex()) + .toArray(RankDoc[]::new); + return new SortFieldAndFormat(new RankDocsSortField(shardRankDocs), DocValueFormat.RAW); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANK_DOCS_RETRIEVER; + } + + @Override + public BucketedSort buildBucketedSort(SearchExecutionContext context, BigArrays bigArrays, int bucketSize, BucketedSort.ExtraData extra) + throws IOException { + throw new UnsupportedOperationException("buildBucketedSort() is not supported for " + this.getClass()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException("toXContent() is not supported for " + this.getClass()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + RankDocsSortBuilder that = (RankDocsSortBuilder) obj; + return Arrays.equals(rankDocs, that.rankDocs) && this.order.equals(that.order); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(this.rankDocs), this.order); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java new file mode 100644 index 0000000000000..3cd29d352028b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortField.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.comparators.NumericComparator; +import org.apache.lucene.util.hnsw.IntToIntFunction; +import org.elasticsearch.search.rank.RankDoc; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * A {@link SortField} that sorts documents by their rank as computed through the {@code RankDocsRetrieverBuilder}. + * This is used when we want to score and rank the documents irrespective of their original scores, + * but based on the provided rank they were assigned, e.g. through an RRF retriever. + **/ +public class RankDocsSortField extends SortField { + + public static final String NAME = "_rank"; + + public RankDocsSortField(RankDoc[] rankDocs) { + super(NAME, new FieldComparatorSource() { + @Override + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { + return new RankDocsComparator(numHits, rankDocs); + } + }); + } + + private static class RankDocsComparator extends NumericComparator { + private final int[] values; + private final Map rankDocMap; + private int topValue; + private int bottom; + + private RankDocsComparator(int numHits, RankDoc[] rankDocs) { + super(NAME, Integer.MAX_VALUE, false, Pruning.NONE, Integer.BYTES); + this.values = new int[numHits]; + this.rankDocMap = Arrays.stream(rankDocs).collect(Collectors.toMap(k -> k.doc, v -> v.rank)); + } + + @Override + public int compare(int slot1, int slot2) { + return Integer.compare(values[slot1], values[slot2]); + } + + @Override + public Integer value(int slot) { + return Integer.valueOf(values[slot]); + } + + @Override + public void setTopValue(Integer value) { + topValue = value; + } + + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + IntToIntFunction docToRank = doc -> rankDocMap.getOrDefault(context.docBase + doc, Integer.MAX_VALUE); + return new LeafFieldComparator() { + @Override + public void setBottom(int slot) throws IOException { + bottom = values[slot]; + } + + @Override + public int compareBottom(int doc) { + return Integer.compare(bottom, docToRank.apply(doc)); + } + + @Override + public int compareTop(int doc) { + return Integer.compare(topValue, docToRank.apply(doc)); + } + + @Override + public void copy(int slot, int doc) { + values[slot] = docToRank.apply(doc); + } + + @Override + public void setScorer(Scorable scorer) {} + }; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index ed02328d388b6..585e7c775da35 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; import org.elasticsearch.search.suggest.SortBy; @@ -463,10 +462,10 @@ private static AtomicArray generateQueryResults( topDocs = Lucene.EMPTY_TOP_DOCS; } else if (rank) { int nDocs = randomIntBetween(0, searchHitsSize); - TestRankDoc[] rankDocs = new TestRankDoc[nDocs]; + RankDoc[] rankDocs = new RankDoc[nDocs]; for (int i = 0; i < nDocs; i++) { float score = useConstantScore ? 1.0F : Math.abs(randomFloat()); - rankDocs[i] = new TestRankDoc(i, score, shardIndex); + rankDocs[i] = new RankDoc(i, score, shardIndex); maxScore = Math.max(score, maxScore); } querySearchResult.setRankShardResult(new TestRankShardResult(rankDocs)); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 84a4eab897ba8..2b1b95d8595f0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.AbstractSearchTestCase; @@ -289,6 +290,11 @@ protected int doHashCode() { public boolean isCompound() { return true; } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } })); searchRequest.allowPartialSearchResults(true); searchRequest.scroll((Scroll) null); @@ -303,6 +309,11 @@ public boolean isCompound() { { // allow_partial_results and non-compound retriever SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder().retriever(new RetrieverBuilder() { + @Override + public QueryBuilder topDocsQuery() { + return null; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { // no-op @@ -361,6 +372,11 @@ protected int doHashCode() { public boolean isCompound() { return true; } + + @Override + public QueryBuilder topDocsQuery() { + return null; + } })); searchRequest.scroll((Scroll) null); ActionRequestValidationException validationErrors = searchRequest.validate(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 7ddcc88facb2a..bdddea58b713f 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -111,9 +111,9 @@ import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; @@ -504,9 +504,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -553,7 +553,7 @@ public RankShardResult buildRankFeatureShardResult(SearchHits hits, int shardId) queryResult = (QuerySearchResult) queryPhaseResults.get(); // these are the matched docs from the query phase - final TestRankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; + final RankDoc[] queryRankDocs = ((TestRankShardResult) queryResult.getRankShardResult()).testRankDocs; // assume that we have cut down to these from the coordinator node as the top-docs to run the rank feature phase upon List topRankWindowSizeDocs = randomNonEmptySubsetOf(Arrays.stream(queryRankDocs).map(x -> x.doc).toList()); @@ -709,18 +709,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -741,9 +741,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -868,9 +868,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -969,18 +969,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -1001,9 +1001,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; @@ -1097,18 +1097,18 @@ public ScoreDoc[] rankQueryPhaseResults( List querySearchResults, SearchPhaseController.TopDocsStats topDocStats ) { - List rankDocs = new ArrayList<>(); + List rankDocs = new ArrayList<>(); for (int i = 0; i < querySearchResults.size(); i++) { QuerySearchResult querySearchResult = querySearchResults.get(i); TestRankShardResult shardResult = (TestRankShardResult) querySearchResult .getRankShardResult(); - for (TestRankDoc trd : shardResult.testRankDocs) { + for (RankDoc trd : shardResult.testRankDocs) { trd.shardIndex = i; rankDocs.add(trd); } } - rankDocs.sort(Comparator.comparing((TestRankDoc doc) -> doc.score).reversed()); - TestRankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(TestRankDoc[]::new); + rankDocs.sort(Comparator.comparing((RankDoc doc) -> doc.score).reversed()); + RankDoc[] topResults = rankDocs.stream().limit(rankWindowSize).toArray(RankDoc[]::new); topDocStats.fetchHits = topResults.length; return topResults; } @@ -1129,9 +1129,9 @@ public RankShardResult combineQueryPhaseResults(List rankResults) { // we know we have just 1 query, so return all the docs from it return new TestRankShardResult( Arrays.stream(rankResults.get(0).scoreDocs) - .map(x -> new TestRankDoc(x.doc, x.score, x.shardIndex)) + .map(x -> new RankDoc(x.doc, x.score, x.shardIndex)) .limit(rankWindowSize()) - .toArray(TestRankDoc[]::new) + .toArray(RankDoc[]::new) ); } }; diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java index b1db177f6ccdd..cb0b69e5ef2ac 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -27,9 +27,9 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.rank.TestRankBuilder; -import org.elasticsearch.search.rank.TestRankDoc; import org.elasticsearch.search.rank.TestRankShardResult; import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; @@ -80,9 +80,9 @@ private static QuerySearchResult createTestInstance() throws Exception { result.from(randomInt()); if (randomBoolean()) { int queryCount = randomIntBetween(2, 4); - TestRankDoc[] docs = new TestRankDoc[randomIntBetween(5, 20)]; + RankDoc[] docs = new RankDoc[randomIntBetween(5, 20)]; for (int di = 0; di < docs.length; ++di) { - docs[di] = new TestRankDoc(di, -1, queryCount); + docs[di] = new RankDoc(di, -1, queryCount); } result.setRankShardResult(new TestRankShardResult(docs)); } diff --git a/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java b/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java new file mode 100644 index 0000000000000..5e7ac2957c250 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/rank/RankDocTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.rank; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class RankDocTests extends AbstractWireSerializingTestCase { + + static RankDoc createTestRankDoc() { + RankDoc rankDoc = new RankDoc(randomNonNegativeInt(), randomFloat(), randomIntBetween(0, 1)); + rankDoc.rank = randomNonNegativeInt(); + return rankDoc; + } + + @Override + protected Writeable.Reader instanceReader() { + return RankDoc::new; + } + + @Override + protected RankDoc createTestInstance() { + return createTestRankDoc(); + } + + @Override + protected RankDoc mutateInstance(RankDoc instance) throws IOException { + RankDoc mutated = new RankDoc(instance.doc, instance.score, instance.shardIndex); + mutated.rank = instance.rank; + if (frequently()) { + mutated.doc = randomNonNegativeInt(); + } + if (frequently()) { + mutated.score = randomFloat(); + } + if (frequently()) { + mutated.shardIndex = randomNonNegativeInt(); + } + if (frequently()) { + mutated.rank = randomNonNegativeInt(); + } + return mutated; + } + + public void testExplain() { + RankDoc instance = createTestRankDoc(); + assertEquals(instance.explain().toString(), instance.explain().toString()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index de35d765a1551..afa6ff89a79e9 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -18,6 +19,9 @@ import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -102,6 +106,34 @@ public void testRewrite() throws IOException { } } + public void testIsCompound() { + KnnRetrieverBuilder knnRetriever = createRandomKnnRetrieverBuilder(); + assertFalse(knnRetriever.isCompound()); + } + + public void testTopDocsQuery() { + KnnRetrieverBuilder knnRetriever = createRandomKnnRetrieverBuilder(); + knnRetriever.rankDocs = new RankDoc[] { + new RankDoc(0, randomFloat(), 0), + new RankDoc(10, randomFloat(), 0), + new RankDoc(20, randomFloat(), 1), + new RankDoc(25, randomFloat(), 1), }; + final int preFilters = knnRetriever.preFilterQueryBuilders.size(); + QueryBuilder topDocsQuery = knnRetriever.topDocsQuery(); + assertNotNull(topDocsQuery); + assertThat(topDocsQuery, instanceOf(BoolQueryBuilder.class)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().size(), equalTo(1 + preFilters)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().get(0), instanceOf(RankDocsQueryBuilder.class)); + for (int i = 0; i < preFilters; i++) { + assertThat( + ((BoolQueryBuilder) topDocsQuery).filter().get(i + 1), + instanceOf(knnRetriever.preFilterQueryBuilders.get(i).getClass()) + ); + } + assertThat(((BoolQueryBuilder) topDocsQuery).should().size(), equalTo(1)); + assertThat(((BoolQueryBuilder) topDocsQuery).should().get(0), instanceOf(ExactKnnQueryBuilder.class)); + } + @Override protected boolean supportsUnknownFields() { return false; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java new file mode 100644 index 0000000000000..59f5ddf0d87ca --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DisMaxQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; +import org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class RankDocsRetrieverBuilderTests extends ESTestCase { + + private Supplier rankDocsSupplier() { + final int rankDocsCount = randomIntBetween(0, 10); + final int shardIndex = 0; + RankDoc[] rankDocs = new RankDoc[rankDocsCount]; + int docId = 0; + for (int i = 0; i < rankDocsCount; i++) { + RankDoc testRankDoc = new RankDoc(docId, randomFloat(), shardIndex); + docId += randomInt(100); + rankDocs[i] = testRankDoc; + } + return () -> rankDocs; + } + + private List innerRetrievers() { + List retrievers = new ArrayList<>(); + int numRetrievers = randomIntBetween(1, 10); + for (int i = 0; i < numRetrievers; i++) { + if (randomBoolean()) { + StandardRetrieverBuilder standardRetrieverBuilder = new StandardRetrieverBuilder(); + standardRetrieverBuilder.queryBuilder = RandomQueryBuilder.createQuery(random()); + if (randomBoolean()) { + standardRetrieverBuilder.preFilterQueryBuilders = preFilters(); + } + retrievers.add(standardRetrieverBuilder); + } else { + KnnRetrieverBuilder knnRetrieverBuilder = new KnnRetrieverBuilder( + randomAlphaOfLength(10), + randomVector(randomInt(10)), + null, + randomInt(10), + randomIntBetween(10, 100), + randomFloat() + ); + if (randomBoolean()) { + knnRetrieverBuilder.preFilterQueryBuilders = preFilters(); + } + knnRetrieverBuilder.rankDocs = rankDocsSupplier().get(); + retrievers.add(knnRetrieverBuilder); + } + } + return retrievers; + } + + private List preFilters() { + List preFilters = new ArrayList<>(); + int numPreFilters = randomInt(10); + for (int i = 0; i < numPreFilters; i++) { + preFilters.add(RandomQueryBuilder.createQuery(random())); + } + return preFilters; + } + + private RankDocsRetrieverBuilder createRandomRankDocsRetrieverBuilder() { + return new RankDocsRetrieverBuilder(randomInt(100), innerRetrievers(), rankDocsSupplier(), preFilters()); + } + + public void testExtractToSearchSourceBuilder() { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + SearchSourceBuilder source = new SearchSourceBuilder(); + if (randomBoolean()) { + source.aggregation(new TermsAggregationBuilder("name").field("field")); + } + retriever.extractToSearchSourceBuilder(source, randomBoolean()); + assertThat(source.sorts().size(), equalTo(2)); + assertThat(source.sorts().get(0), instanceOf(RankDocsSortBuilder.class)); + assertThat(source.sorts().get(1), instanceOf(ScoreSortBuilder.class)); + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + if (source.aggregations() != null) { + assertThat(bq.must().size(), equalTo(0)); + assertThat(bq.should().size(), greaterThanOrEqualTo(1)); + assertThat(bq.should().get(0), instanceOf(RankDocsQueryBuilder.class)); + assertNotNull(source.postFilter()); + assertThat(source.postFilter(), instanceOf(RankDocsQueryBuilder.class)); + } else { + assertThat(bq.must().size(), equalTo(1)); + assertThat(bq.must().get(0), instanceOf(RankDocsQueryBuilder.class)); + assertNull(source.postFilter()); + } + assertThat(bq.filter().size(), equalTo(retriever.preFilterQueryBuilders.size())); + } + + public void testTopDocsQuery() { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + QueryBuilder topDocs = retriever.topDocsQuery(); + assertNotNull(topDocs); + assertThat(topDocs, instanceOf(DisMaxQueryBuilder.class)); + assertThat(((DisMaxQueryBuilder) topDocs).innerQueries(), hasSize(retriever.sources.size())); + } + + public void testRewrite() throws IOException { + RankDocsRetrieverBuilder retriever = createRandomRankDocsRetrieverBuilder(); + boolean compoundAdded = false; + if (randomBoolean()) { + compoundAdded = true; + retriever.sources.add(new TestRetrieverBuilder("compound_retriever") { + @Override + public boolean isCompound() { + return true; + } + }); + } + SearchSourceBuilder source = new SearchSourceBuilder().retriever(retriever); + QueryRewriteContext queryRewriteContext = mock(QueryRewriteContext.class); + if (compoundAdded) { + expectThrows(AssertionError.class, () -> Rewriteable.rewrite(source, queryRewriteContext)); + } else { + SearchSourceBuilder rewrittenSource = Rewriteable.rewrite(source, queryRewriteContext); + assertNull(rewrittenSource.retriever()); + assertTrue(rewrittenSource.knnSearch().isEmpty()); + assertThat( + rewrittenSource.query(), + anyOf(instanceOf(BoolQueryBuilder.class), instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class)) + ); + if (rewrittenSource.query() instanceof BoolQueryBuilder) { + BoolQueryBuilder bq = (BoolQueryBuilder) rewrittenSource.query(); + assertThat(bq.filter().size(), equalTo(retriever.preFilterQueryBuilders.size())); + // we don't have any aggregations so the RankDocs query is set as a must clause + assertThat(bq.must().size(), equalTo(1)); + assertThat(bq.must().get(0), instanceOf(RankDocsQueryBuilder.class)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java index cd0d8f8d50c1e..166b07e23446c 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java @@ -166,6 +166,37 @@ public void testRewrite() throws IOException { } } + public void testIsCompound() { + StandardRetrieverBuilder standardRetriever = createTestInstance(); + assertFalse(standardRetriever.isCompound()); + } + + public void testTopDocsQuery() throws IOException { + StandardRetrieverBuilder standardRetriever = createTestInstance(); + final int preFilters = standardRetriever.preFilterQueryBuilders.size(); + if (standardRetriever.queryBuilder == null) { + if (preFilters > 0) { + expectThrows(IllegalArgumentException.class, standardRetriever::topDocsQuery); + } + } else { + QueryBuilder topDocsQuery = standardRetriever.topDocsQuery(); + assertNotNull(topDocsQuery); + if (preFilters > 0) { + assertThat(topDocsQuery, instanceOf(BoolQueryBuilder.class)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().size(), equalTo(1 + preFilters)); + assertThat(((BoolQueryBuilder) topDocsQuery).filter().get(0), instanceOf(standardRetriever.queryBuilder.getClass())); + for (int i = 0; i < preFilters; i++) { + assertThat( + ((BoolQueryBuilder) topDocsQuery).filter().get(i + 1), + instanceOf(standardRetriever.preFilterQueryBuilders.get(i).getClass()) + ); + } + } else { + assertThat(topDocsQuery, instanceOf(standardRetriever.queryBuilder.getClass())); + } + } + } + private static void assertEqualQueryOrMatchAllNone(QueryBuilder actual, QueryBuilder expected) { assertThat(actual, anyOf(instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class), equalTo(expected))); } diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java new file mode 100644 index 0000000000000..5ff0ec2017e96 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Arrays; + +public class RankDocsQueryBuilderTests extends AbstractQueryTestCase { + + private RankDoc[] generateRandomRankDocs() { + int totalDocs = randomIntBetween(0, 10); + RankDoc[] rankDocs = new RankDoc[totalDocs]; + int currentDoc = 0; + for (int i = 0; i < totalDocs; i++) { + RankDoc rankDoc = new RankDoc(currentDoc, randomFloat(), randomIntBetween(0, 2)); + rankDocs[i] = rankDoc; + currentDoc += randomIntBetween(0, 100); + } + return rankDocs; + } + + @Override + protected RankDocsQueryBuilder doCreateTestQueryBuilder() { + RankDoc[] rankDocs = generateRandomRankDocs(); + return new RankDocsQueryBuilder(rankDocs); + } + + @Override + protected void doAssertLuceneQuery(RankDocsQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + assertTrue(query instanceof RankDocsQuery); + RankDocsQuery rankDocsQuery = (RankDocsQuery) query; + assertArrayEquals(queryBuilder.rankDocs(), rankDocsQuery.rankDocs()); + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testToQuery() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + Query query = queryBuilder.doToQuery(context); + + assertTrue(query instanceof RankDocsQuery); + RankDocsQuery rankDocsQuery = (RankDocsQuery) query; + + int shardIndex = context.getShardRequestIndex(); + int expectedDocs = (int) Arrays.stream(queryBuilder.rankDocs()).filter(x -> x.shardIndex == shardIndex).count(); + assertEquals(expectedDocs, rankDocsQuery.rankDocs().length); + } + } + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testCacheability() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new SearchExecutionContext(context)); + assertNotNull(rewriteQuery.toQuery(context)); + assertTrue("query should be cacheable: " + queryBuilder.toString(), context.isCacheable()); + } + } + } + + /** + * Overridden to ensure that {@link SearchExecutionContext} has a non-null {@link IndexReader} + */ + @Override + public void testMustRewrite() throws IOException { + try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + iw.addDocument(new Document()); + try (IndexReader reader = iw.getReader()) { + SearchExecutionContext context = createSearchExecutionContext(newSearcher(reader)); + context.setAllowUnmappedFields(true); + RankDocsQueryBuilder queryBuilder = createTestQueryBuilder(); + queryBuilder.toQuery(context); + } + } + } + + @Override + public void testFromXContent() throws IOException { + // no-op since RankDocsQueryBuilder is an internal only API + } + + @Override + public void testUnknownField() throws IOException { + // no-op since RankDocsQueryBuilder is agnostic to unknown fields and an internal only API + } + + @Override + public void testValidOutput() throws IOException { + // no-op since RankDocsQueryBuilder is an internal only API + } +} diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java new file mode 100644 index 0000000000000..ba7728c5dc622 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsSortBuilderTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.retriever.rankdoc; + +import org.apache.lucene.search.SortField; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.sort.AbstractSortTestCase; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class RankDocsSortBuilderTests extends AbstractSortTestCase { + + @Override + protected RankDocsSortBuilder createTestItem() { + return randomRankDocsSortBuulder(); + } + + private RankDocsSortBuilder randomRankDocsSortBuulder() { + RankDoc[] rankDocs = randomRankDocs(randomInt(100)); + return new RankDocsSortBuilder(rankDocs); + } + + private RankDoc[] randomRankDocs(int totalDocs) { + RankDoc[] rankDocs = new RankDoc[totalDocs]; + for (int i = 0; i < totalDocs; i++) { + rankDocs[i] = new RankDoc(randomNonNegativeInt(), randomFloat(), randomIntBetween(0, 1)); + rankDocs[i].rank = i + 1; + } + return rankDocs; + } + + @Override + protected RankDocsSortBuilder mutate(RankDocsSortBuilder original) throws IOException { + RankDocsSortBuilder mutated = new RankDocsSortBuilder(original); + mutated.rankDocs(randomRankDocs(original.rankDocs().length + randomInt(100))); + return mutated; + } + + @Override + public void testFromXContent() throws IOException { + // no-op + } + + @Override + protected RankDocsSortBuilder fromXContent(XContentParser parser, String fieldName) throws IOException { + throw new UnsupportedOperationException( + "{" + RankDocsSortBuilder.class.getSimpleName() + "} does not support parsing from XContent" + ); + } + + @Override + protected void sortFieldAssertions(RankDocsSortBuilder builder, SortField sortField, DocValueFormat format) throws IOException { + assertThat(builder.order(), equalTo(SortOrder.ASC)); + assertThat(sortField, instanceOf(RankDocsSortField.class)); + assertThat(sortField.getField(), equalTo(RankDocsSortField.NAME)); + assertThat(sortField.getType(), equalTo(SortField.Type.CUSTOM)); + assertThat(sortField.getReverse(), equalTo(false)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java deleted file mode 100644 index f2f3cb82d203f..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankDoc.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.rank; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -public class TestRankDoc extends RankDoc { - - public TestRankDoc(int doc, float score, int shardIndex) { - super(doc, score, shardIndex); - } - - public TestRankDoc(StreamInput in) throws IOException { - super(in); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - // do nothing - } - - @Override - public boolean doEquals(RankDoc rd) { - return true; - } - - @Override - public int doHashCode() { - return 0; - } - - @Override - public String getWriteableName() { - return "test_rank_doc"; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java index ab66d021497d5..6c1faaf8d2abf 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java +++ b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java @@ -17,14 +17,14 @@ public class TestRankShardResult implements RankShardResult { - public final TestRankDoc[] testRankDocs; + public final RankDoc[] testRankDocs; - public TestRankShardResult(TestRankDoc[] testRankDocs) { + public TestRankShardResult(RankDoc[] testRankDocs) { this.testRankDocs = testRankDocs; } public TestRankShardResult(StreamInput in) throws IOException { - testRankDocs = in.readArray(TestRankDoc::new, TestRankDoc[]::new); + testRankDocs = in.readArray(RankDoc::new, RankDoc[]::new); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java index 40cc1890f69ed..fcc01adf1815a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestRetrieverBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.retriever; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; @@ -65,6 +66,11 @@ public TestRetrieverBuilder(String value) { this.value = value; } + @Override + public QueryBuilder topDocsQuery() { + throw new UnsupportedOperationException("only used for parsing tests"); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { throw new UnsupportedOperationException("only used for parsing tests"); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java index ab8c85cac00e3..eb36c445506a7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; @@ -25,7 +26,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** - * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + * A {@code RetrieverBuilder} for randomly scoring a set of documents using the {@code RandomRankBuilder} */ public class RandomRankRetrieverBuilder extends RetrieverBuilder { @@ -74,6 +75,11 @@ public RandomRankRetrieverBuilder(RetrieverBuilder retrieverBuilder, String fiel this.seed = seed; } + @Override + public QueryBuilder topDocsQuery() { + return retrieverBuilder.topDocsQuery(); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index e1d27e96cc5ff..927c708268a49 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -9,6 +9,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -20,6 +22,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; +import java.util.List; import java.util.Objects; import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; @@ -99,11 +102,61 @@ public TextSimilarityRankRetrieverBuilder( this.minScore = minScore; } + public TextSimilarityRankRetrieverBuilder( + RetrieverBuilder retrieverBuilder, + String inferenceId, + String inferenceText, + String field, + int rankWindowSize, + Float minScore, + String retrieverName, + List preFilterQueryBuilders + ) { + this.retrieverBuilder = retrieverBuilder; + this.inferenceId = inferenceId; + this.inferenceText = inferenceText; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.minScore = minScore; + this.retrieverName = retrieverName; + this.preFilterQueryBuilders = preFilterQueryBuilders; + } + + @Override + public QueryBuilder topDocsQuery() { + // the original matching set of the TextSimilarityRank retriever is specified by its nested retriever + return retrieverBuilder.topDocsQuery(); + } + + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + // rewrite prefilters + boolean hasChanged = false; + var newPreFilters = rewritePreFilters(ctx); + hasChanged |= newPreFilters != preFilterQueryBuilders; + + // rewrite nested retriever + RetrieverBuilder newRetriever = retrieverBuilder.rewrite(ctx); + hasChanged |= newRetriever != retrieverBuilder; + if (hasChanged) { + return new TextSimilarityRankRetrieverBuilder( + newRetriever, + field, + inferenceText, + inferenceId, + rankWindowSize, + minScore, + this.retrieverName, + newPreFilters + ); + } + return this; + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); - // Combining with other rank builder (such as RRF) is not supported yet if (searchSourceBuilder.rankBuilder() != null) { throw new IllegalArgumentException("text similarity rank builder cannot be combined with other rank builders"); @@ -114,6 +167,13 @@ public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder ); } + /** + * Determines if this retriever contains sub-retrievers that need to be executed prior to search. + */ + public boolean isCompound() { + return retrieverBuilder.isCompound(); + } + @Override public String getName() { return TextSimilarityRankBuilder.NAME; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java index 51f240be6fbeb..332e567cae796 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilderTests.java @@ -8,10 +8,22 @@ package org.elasticsearch.xpack.inference.rank.textsimilarity; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.builder.SubSearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.TestRetrieverBuilder; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -23,6 +35,10 @@ import java.util.List; import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class TextSimilarityRankRetrieverBuilderTests extends AbstractXContentTestCase { @@ -32,8 +48,17 @@ public class TextSimilarityRankRetrieverBuilderTests extends AbstractXContentTes * for x-content testing. */ public static TextSimilarityRankRetrieverBuilder createRandomTextSimilarityRankRetrieverBuilder() { + return createRandomTextSimilarityRankRetrieverBuilder(TestRetrieverBuilder.createRandomTestRetrieverBuilder()); + } + + /** + * Creates a random {@link TextSimilarityRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static TextSimilarityRankRetrieverBuilder createRandomTextSimilarityRankRetrieverBuilder(RetrieverBuilder innerRetriever) { return new TextSimilarityRankRetrieverBuilder( - TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + innerRetriever, randomAlphaOfLength(10), randomAlphaOfLength(20), randomAlphaOfLength(50), @@ -104,4 +129,122 @@ public void testParserDefaults() throws IOException { } } + public void testRewriteInnerRetriever() throws IOException { + final boolean[] rewritten = { false }; + List preFilterQueryBuilders = new ArrayList<>(); + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 5); i++) { + preFilterQueryBuilders.add(RandomQueryBuilder.createQuery(random())); + } + } + RetrieverBuilder innerRetriever = new TestRetrieverBuilder("top-level-retriever") { + @Override + public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (randomBoolean()) { + return this; + } + rewritten[0] = true; + return new TestRetrieverBuilder("nested-rewritten-retriever") { + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (preFilterQueryBuilders.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + for (QueryBuilder preFilterQueryBuilder : preFilterQueryBuilders) { + boolQueryBuilder.filter(preFilterQueryBuilder); + } + boolQueryBuilder.must(new RangeQueryBuilder("some_field")); + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(boolQueryBuilder)); + } else { + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(new RangeQueryBuilder("some_field"))); + } + } + }; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + if (preFilterQueryBuilders.isEmpty() == false) { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + for (QueryBuilder preFilterQueryBuilder : preFilterQueryBuilders) { + boolQueryBuilder.filter(preFilterQueryBuilder); + } + boolQueryBuilder.must(new TermQueryBuilder("field", "value")); + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(boolQueryBuilder)); + } else { + searchSourceBuilder.subSearches().add(new SubSearchSourceBuilder(new TermQueryBuilder("field", "value"))); + } + } + }; + TextSimilarityRankRetrieverBuilder textSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + innerRetriever + ); + textSimilarityRankRetrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); + SearchSourceBuilder source = new SearchSourceBuilder().retriever(textSimilarityRankRetrieverBuilder); + QueryRewriteContext queryRewriteContext = mock(QueryRewriteContext.class); + source = Rewriteable.rewrite(source, queryRewriteContext); + assertNull(source.retriever()); + if (false == preFilterQueryBuilders.isEmpty()) { + if (source.query() instanceof MatchAllQueryBuilder == false && source.query() instanceof MatchNoneQueryBuilder == false) { + assertThat(source.query(), instanceOf(BoolQueryBuilder.class)); + BoolQueryBuilder bq = (BoolQueryBuilder) source.query(); + assertFalse(bq.must().isEmpty()); + assertThat(bq.must().size(), equalTo(1)); + if (rewritten[0]) { + assertThat(bq.must().get(0), instanceOf(RangeQueryBuilder.class)); + } else { + assertThat(bq.must().get(0), instanceOf(TermQueryBuilder.class)); + } + for (int j = 0; j < bq.filter().size(); j++) { + assertEqualQueryOrMatchAllNone(bq.filter().get(j), preFilterQueryBuilders.get(j)); + } + } + } else { + if (rewritten[0]) { + assertThat(source.query(), instanceOf(RangeQueryBuilder.class)); + } else { + assertThat(source.query(), instanceOf(TermQueryBuilder.class)); + } + } + } + + public void testIsCompound() { + RetrieverBuilder compoundInnerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public boolean isCompound() { + return true; + } + }; + RetrieverBuilder nonCompoundInnerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public boolean isCompound() { + return false; + } + }; + TextSimilarityRankRetrieverBuilder compoundTextSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + compoundInnerRetriever + ); + assertTrue(compoundTextSimilarityRankRetrieverBuilder.isCompound()); + TextSimilarityRankRetrieverBuilder nonCompoundTextSimilarityRankRetrieverBuilder = createRandomTextSimilarityRankRetrieverBuilder( + nonCompoundInnerRetriever + ); + assertFalse(nonCompoundTextSimilarityRankRetrieverBuilder.isCompound()); + } + + public void testTopDocsQuery() { + RetrieverBuilder innerRetriever = new TestRetrieverBuilder(ESTestCase.randomAlphaOfLengthBetween(5, 10)) { + @Override + public QueryBuilder topDocsQuery() { + return new TermQueryBuilder("field", "value"); + } + }; + TextSimilarityRankRetrieverBuilder retriever = createRandomTextSimilarityRankRetrieverBuilder(innerRetriever); + assertThat(retriever.topDocsQuery(), instanceOf(TermQueryBuilder.class)); + } + + private static void assertEqualQueryOrMatchAllNone(QueryBuilder actual, QueryBuilder expected) { + assertThat(actual, anyOf(instanceOf(MatchAllQueryBuilder.class), instanceOf(MatchNoneQueryBuilder.class), equalTo(expected))); + } + } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml index 530be2341c9c8..e2c1417057578 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/70_text_similarity_rank_retriever.yml @@ -60,6 +60,7 @@ setup: text: "Sun Moon Lake is a lake in Nantou County, Taiwan. It is the largest lake in Taiwan." topic: [ "geography" ] refresh: true + --- "Simple text similarity rank retriever": diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java index 8f078c0c4d116..4dbc9a6a54dcf 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.Explanation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; @@ -54,6 +56,32 @@ public RRFRankDoc(StreamInput in) throws IOException { scores = in.readFloatArray(); } + @Override + public Explanation explain() { + // ideally we'd need access to the rank constant to provide score info for this one + int queries = positions.length; + Explanation[] details = new Explanation[queries]; + for (int i = 0; i < queries; i++) { + final String queryIndex = "at index [" + i + "]"; + if (positions[i] == RRFRankDoc.NO_RANK) { + final String description = "rrf score: [0], result not found in query " + queryIndex; + details[i] = Explanation.noMatch(description); + } else { + final int rank = positions[i] + 1; + details[i] = Explanation.match(rank, "rank [" + (rank) + "] in query " + queryIndex); + } + } + return Explanation.match( + score, + "rrf score: [" + + score + + "] computed for initial ranks " + + Arrays.toString(Arrays.stream(positions).map(x -> x + 1).toArray()) + + "] as sum of [1 / (rank + rankConstant)] for each query", + details + ); + } + @Override public void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(rank); @@ -96,4 +124,10 @@ public String toString() { public String getWriteableName() { return NAME; } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("positions", positions); + builder.field("scores", scores); + } } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index e5a7983107278..0d6208e474eea 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -74,6 +75,11 @@ public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverP int rankWindowSize = RRFRankBuilder.DEFAULT_RANK_WINDOW_SIZE; int rankConstant = RRFRankBuilder.DEFAULT_RANK_CONSTANT; + @Override + public QueryBuilder topDocsQuery() { + throw new IllegalStateException("{" + getName() + "} cannot be nested"); + } + @Override public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { if (compoundUsed) { diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java index 8c0eafe3ab022..0b8ee30fe0680 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankDocTests.java @@ -71,4 +71,9 @@ protected RRFRankDoc mutateInstance(RRFRankDoc instance) throws IOException { } return mutated; } + + public void testExplain() { + RRFRankDoc instance = createTestRRFRankDoc(); + assertEquals(instance.explain().toString(), instance.explain().toString()); + } } From 7563a724f05fab579d79144b2b2ac570bf35bcb0 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 26 Aug 2024 16:15:31 +0300 Subject: [PATCH 074/352] Updating retriever documentation to better explain how filters are applied (#112201) --- docs/reference/rest-api/common-parms.asciidoc | 16 +++++++--- docs/reference/search/retriever.asciidoc | 23 ++++++++++--- docs/reference/search/rrf.asciidoc | 6 ++-- .../retrievers-overview.asciidoc | 32 +++++++++---------- .../action/search/RankFeaturePhase.java | 6 ++-- .../QueryPhaseRankCoordinatorContext.java | 2 +- .../context/QueryPhaseRankShardContext.java | 2 +- 7 files changed, 53 insertions(+), 34 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e5ab10b7d71ba..fabd495cdc525 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1327,13 +1327,21 @@ that lower ranked documents have more influence. This value must be greater than equal to `1`. Defaults to `60`. end::rrf-rank-constant[] -tag::rrf-window-size[] -`window_size`:: +tag::rrf-rank-window-size[] +`rank_window_size`:: (Optional, integer) + This value determines the size of the individual result sets per query. A higher value will improve result relevance at the cost of performance. The final ranked result set is pruned down to the search request's <>. -`window_size` must be greater than or equal to `size` and greater than or equal to `1`. +`rank_window_size` must be greater than or equal to `size` and greater than or equal to `1`. Defaults to the `size` parameter. -end::rrf-window-size[] +end::rrf-rank-window-size[] + +tag::rrf-filter[] +`filter`:: +(Optional, <>) ++ +Applies the specified <> to all of the specified sub-retrievers, +according to each retriever's specifications. +end::rrf-filter[] diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index b52b296220029..58cc8ce9ef459 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -198,7 +198,7 @@ GET my-embeddings/_search An <> retriever returns top documents based on the RRF formula, equally weighting two or more child retrievers. -Reciprocal rank fusion (RRF) is a method for combining multiple result +Reciprocal rank fusion (RRF) is a method for combining multiple result sets with different relevance indicators into a single result set. ===== Parameters @@ -207,7 +207,9 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-filter] ===== Restrictions @@ -225,7 +227,7 @@ A simple hybrid search example (lexical search + dense vector search) combining ---- GET /restaurants/_search { - "retriever": { + "retriever": { "rrf": { <1> "retrievers": [ <2> { @@ -340,6 +342,10 @@ Currently you can: ** Refer to the <> on this page for a step-by-step guide. ===== Parameters +`retriever`:: +(Required, <>) ++ +The child retriever that generates the initial set of top documents to be re-ranked. `field`:: (Required, `string`) @@ -366,6 +372,13 @@ The number of top documents to consider in the re-ranking process. Defaults to ` + Sets a minimum threshold score for including documents in the re-ranked results. Documents with similarity scores below this threshold will be excluded. Note that score calculations vary depending on the model used. +`filter`:: +(Optional, <>) ++ +Applies the specified <> to the child <>. +If the child retriever already specifies any filters, then this top-level filter is applied in conjuction +with the filter defined in the child retriever. + ===== Restrictions A text similarity re-ranker retriever is a compound retriever. Child retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. @@ -441,13 +454,13 @@ eland_import_hub_model \ + [source,js] ---- -PUT _inference/rerank/my-msmarco-minilm-model +PUT _inference/rerank/my-msmarco-minilm-model { "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, - "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" + "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" } } ---- diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index fb474fe6bf4e6..2525dfff23b94 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -1,9 +1,7 @@ [[rrf]] === Reciprocal rank fusion -preview::["This functionality is in technical preview and may be changed or removed in a future release. -The syntax will likely change before GA. -Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] +preview::["This functionality is in technical preview and may be changed or removed in a future release. The syntax will likely change before GA. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf[Reciprocal rank fusion (RRF)] is a method for combining multiple result sets with different relevance indicators into a single result set. @@ -43,7 +41,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-retrievers] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-constant] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-window-size] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=rrf-rank-window-size] An example request using RRF: diff --git a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc index 99659ae76e092..c0fe7471946f3 100644 --- a/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-reranking/retrievers-overview.asciidoc @@ -13,23 +13,23 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -==== Retriever types +==== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: -* <>. Returns top documents from a -traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. -Mimics a traditional query but in the context of a retriever framework. This -ensures backward compatibility as existing `_search` requests remain supported. -That way you can transition to the new abstraction at your own pace without +* <>. Returns top documents from a +traditional https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl.html[query]. +Mimics a traditional query but in the context of a retriever framework. This +ensures backward compatibility as existing `_search` requests remain supported. +That way you can transition to the new abstraction at your own pace without mixing syntaxes. -* <>. Returns top documents from a <>, +* <>. Returns top documents from a <>, in the context of a retriever framework. * <>. Combines and ranks multiple first-stage retrievers using -the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets +the reciprocal rank fusion (RRF) algorithm. Allows you to combine multiple result sets with different relevance indicators into a single result set. -An RRF retriever is a *compound retriever*, where its `filter` element is +An RRF retriever is a *compound retriever*, where its `filter` element is propagated to its sub retrievers. + Sub retrievers may not use elements that are restricted by having a compound retriever as part of the retriever tree. @@ -38,7 +38,7 @@ See the <> for detaile Requires first creating a `rerank` task using the <>. [discrete] -==== What makes retrievers useful? +==== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -140,7 +140,7 @@ GET example-index/_search ], "rank":{ "rrf":{ - "window_size":50, + "rank_window_size":50, "rank_constant":20 } } @@ -155,14 +155,14 @@ GET example-index/_search Here are some important terms: -* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to +* *Retrieval Pipeline*. Defines the entire retrieval and ranking logic to produce top hits. * *Retriever Tree*. A hierarchical structure that defines how retrievers interact. * *First-stage Retriever*. Returns an initial set of candidate documents. -* *Compound Retriever*. Builds on one or more retrievers, +* *Compound Retriever*. Builds on one or more retrievers, enhancing document retrieval and ranking logic. -* *Combiners*. Compound retrievers that merge top hits -from multiple sub-retrievers. +* *Combiners*. Compound retrievers that merge top hits +from multiple sub-retrievers. * *Rerankers*. Special compound retrievers that reorder hits and may adjust the number of hits, with distinctions between first-stage and second-stage rerankers. [discrete] @@ -180,4 +180,4 @@ Refer to the {kibana-ref}/playground.html[Playground documentation] for more inf [[retrievers-overview-api-reference]] ==== API reference -For implementation details, including notable restrictions, check out the <> in the Search API docs. \ No newline at end of file +For implementation details, including notable restrictions, check out the <> in the Search API docs. diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 5b42afcb86928..0f7cbd65a63c2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -28,8 +28,8 @@ /** * This search phase is responsible for executing any re-ranking needed for the given search request, iff that is applicable. - * It starts by retrieving {@code num_shards * window_size} results from the query phase and reduces them to a global list of - * the top {@code window_size} results. It then reaches out to the shards to extract the needed feature data, + * It starts by retrieving {@code num_shards * rank_window_size} results from the query phase and reduces them to a global list of + * the top {@code rank_window_size} results. It then reaches out to the shards to extract the needed feature data, * and finally passes all this information to the appropriate {@code RankFeatureRankCoordinatorContext} which is responsible for reranking * the results. If no rank query is specified, it proceeds directly to the next phase (FetchSearchPhase) by first reducing the results. */ @@ -88,7 +88,7 @@ public void onFailure(Exception e) { void innerRun() throws Exception { // if the RankBuilder specifies a QueryPhaseCoordinatorContext, it will be called as part of the reduce call - // to operate on the first `window_size * num_shards` results and merge them appropriately. + // to operate on the first `rank_window_size * num_shards` results and merge them appropriately. SearchPhaseController.ReducedQueryPhase reducedQueryPhase = queryPhaseResults.reduce(); RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext = coordinatorContext(context.getRequest().source()); if (rankFeaturePhaseRankCoordinatorContext != null) { diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java index 1be8544758a8f..4d1c9c6785453 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankCoordinatorContext.java @@ -17,7 +17,7 @@ /** * {@link QueryPhaseRankCoordinatorContext} is running on the coordinator node and is * responsible for combining the query phase results from the shards and rank them accordingly. - * The output is a `window_size` ranked list of ordered results from all shards. + * The output is a `rank_window_size` ranked list of ordered results from all shards. * Note: Currently this can use only sort by score; sort by field is not supported. */ public abstract class QueryPhaseRankCoordinatorContext { diff --git a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java index f562977afb857..fa413485797e8 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java +++ b/server/src/main/java/org/elasticsearch/search/rank/context/QueryPhaseRankShardContext.java @@ -15,7 +15,7 @@ import java.util.List; /** - * {@link QueryPhaseRankShardContext} is used to generate the top {@code window_size} + * {@link QueryPhaseRankShardContext} is used to generate the top {@code rank_window_size} * results on each shard. It specifies the queries to run during {@code QueryPhase} and is responsible for combining all query scores and * order all results through the {@link QueryPhaseRankShardContext#combineQueryPhaseResults} method. */ From 0d371978e888cf7c7186f6f04223f6cdae88d4fa Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Mon, 26 Aug 2024 09:53:26 -0400 Subject: [PATCH 075/352] Search coordinator uses event.ingested in cluster state to do rewrites (#111523) * Search coordinator uses event.ingested in cluster state to do rewrites Min/max range for the event.ingested timestamp field (part of Elastic Common Schema) was added to IndexMetadata in cluster state for searchable snapshots in #106252. This commit modifies the search coordinator to rewrite searches to MatchNone if the query searches a range of event.ingested that, from the min/max range in cluster state, is known to not overlap. This is the same behavior we currently have for the @timestamp field. --- docs/changelog/111523.yaml | 5 + .../TimestampFieldMapperServiceTests.java | 4 +- .../query/CoordinatorRewriteContext.java | 61 ++- .../CoordinatorRewriteContextProvider.java | 27 +- .../index/query/RangeQueryBuilder.java | 18 +- .../indices/DateFieldRangeInfo.java | 28 ++ .../elasticsearch/indices/IndicesService.java | 19 +- .../indices/TimestampFieldMapperService.java | 54 ++- .../CanMatchPreFilterSearchPhaseTests.java | 344 ++++++++++++--- .../test/AbstractBuilderTestCase.java | 11 +- .../index/engine/frozen/FrozenIndexIT.java | 163 ++++++- ...pshotsCanMatchOnCoordinatorIntegTests.java | 409 ++++++++++++++++-- 12 files changed, 962 insertions(+), 181 deletions(-) create mode 100644 docs/changelog/111523.yaml create mode 100644 server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml new file mode 100644 index 0000000000000..202d16c5a426d --- /dev/null +++ b/docs/changelog/111523.yaml @@ -0,0 +1,5 @@ +pr: 111523 +summary: Search coordinator uses `event.ingested` in cluster state to do rewrites +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java index 97959fa385241..eb35c44d30331 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/TimestampFieldMapperServiceTests.java @@ -61,7 +61,7 @@ public void testGetTimestampFieldTypeForTsdbDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, notNullValue()); } @@ -70,7 +70,7 @@ public void testGetTimestampFieldTypeForDataStream() throws IOException { DocWriteResponse indexResponse = indexDoc(); var indicesService = getInstanceFromNode(IndicesService.class); - var result = indicesService.getTimestampFieldType(indexResponse.getShardId().getIndex()); + var result = indicesService.getTimestampFieldTypeInfo(indexResponse.getShardId().getIndex()); assertThat(result, nullValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index ac6512b0839e6..7cb1b04972bfa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -9,11 +9,13 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappingLookup; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.Collections; @@ -23,19 +25,24 @@ * Context object used to rewrite {@link QueryBuilder} instances into simplified version in the coordinator. * Instances of this object rely on information stored in the {@code IndexMetadata} for certain indices. * Right now this context object is able to rewrite range queries that include a known timestamp field - * (i.e. the timestamp field for DataStreams) into a MatchNoneQueryBuilder and skip the shards that - * don't hold queried data. See IndexMetadata#getTimestampRange() for more details + * (i.e. the timestamp field for DataStreams or the 'event.ingested' field in ECS) into a MatchNoneQueryBuilder + * and skip the shards that don't hold queried data. See IndexMetadata for more details. */ public class CoordinatorRewriteContext extends QueryRewriteContext { - private final IndexLongFieldRange indexLongFieldRange; - private final DateFieldMapper.DateFieldType timestampFieldType; + private final DateFieldRangeInfo dateFieldRangeInfo; + /** + * Context for coordinator search rewrites based on time ranges for the @timestamp field and/or 'event.ingested' field + * @param parserConfig + * @param client + * @param nowInMillis + * @param dateFieldRangeInfo range and field type info for @timestamp and 'event.ingested' + */ public CoordinatorRewriteContext( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, - IndexLongFieldRange indexLongFieldRange, - DateFieldMapper.DateFieldType timestampFieldType + DateFieldRangeInfo dateFieldRangeInfo ) { super( parserConfig, @@ -54,29 +61,37 @@ public CoordinatorRewriteContext( null, null ); - this.indexLongFieldRange = indexLongFieldRange; - this.timestampFieldType = timestampFieldType; - } - - long getMinTimestamp() { - return indexLongFieldRange.getMin(); - } - - long getMaxTimestamp() { - return indexLongFieldRange.getMax(); - } - - boolean hasTimestampData() { - return indexLongFieldRange.isComplete() && indexLongFieldRange != IndexLongFieldRange.EMPTY; + this.dateFieldRangeInfo = dateFieldRangeInfo; } + /** + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return MappedField with type for the field. Returns null if fieldName is not one of the allowed field names. + */ @Nullable public MappedFieldType getFieldType(String fieldName) { - if (fieldName.equals(timestampFieldType.name()) == false) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.timestampFieldType(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.eventIngestedFieldType(); + } else { return null; } + } - return timestampFieldType; + /** + * @param fieldName Must be one of DataStream.TIMESTAMP_FIELD_FIELD or IndexMetadata.EVENT_INGESTED_FIELD_NAME + * @return IndexLongFieldRange with min/max ranges for the field. Returns null if fieldName is not one of the allowed field names. + */ + @Nullable + public IndexLongFieldRange getFieldRange(String fieldName) { + if (DataStream.TIMESTAMP_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.timestampRange(); + } else if (IndexMetadata.EVENT_INGESTED_FIELD_NAME.equals(fieldName)) { + return dateFieldRangeInfo.eventIngestedRange(); + } else { + return null; + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java index e44861b4afe8a..ec53dfe5c0d05 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContextProvider.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.util.function.Function; @@ -25,14 +26,14 @@ public class CoordinatorRewriteContextProvider { private final Client client; private final LongSupplier nowInMillis; private final Supplier clusterStateSupplier; - private final Function mappingSupplier; + private final Function mappingSupplier; public CoordinatorRewriteContextProvider( XContentParserConfiguration parserConfig, Client client, LongSupplier nowInMillis, Supplier clusterStateSupplier, - Function mappingSupplier + Function mappingSupplier ) { this.parserConfig = parserConfig; this.client = client; @@ -49,18 +50,30 @@ public CoordinatorRewriteContext getCoordinatorRewriteContext(Index index) { if (indexMetadata == null) { return null; } - DateFieldMapper.DateFieldType dateFieldType = mappingSupplier.apply(index); - if (dateFieldType == null) { + DateFieldRangeInfo dateFieldRangeInfo = mappingSupplier.apply(index); + if (dateFieldRangeInfo == null) { return null; } + DateFieldMapper.DateFieldType timestampFieldType = dateFieldRangeInfo.timestampFieldType(); IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + if (timestampRange.containsAllShardRanges() == false) { - timestampRange = indexMetadata.getTimeSeriesTimestampRange(dateFieldType); - if (timestampRange == null) { + // if @timestamp range is not present or not ready in cluster state, fallback to using time series range (if present) + timestampRange = indexMetadata.getTimeSeriesTimestampRange(timestampFieldType); + // if timestampRange in the time series is null AND the eventIngestedRange is not ready for use, return null (no coord rewrite) + if (timestampRange == null && eventIngestedRange.containsAllShardRanges() == false) { return null; } } - return new CoordinatorRewriteContext(parserConfig, client, nowInMillis, timestampRange, dateFieldType); + // the DateFieldRangeInfo from the mappingSupplier only has field types, but not ranges + // so create a new object with ranges pulled from cluster state + return new CoordinatorRewriteContext( + parserConfig, + client, + nowInMillis, + new DateFieldRangeInfo(timestampFieldType, timestampRange, dateFieldRangeInfo.eventIngestedFieldType(), eventIngestedRange) + ); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 4d2a6d3eaecdb..8b154b3845964 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -436,15 +437,22 @@ public String getWriteableName() { protected MappedFieldType.Relation getRelation(final CoordinatorRewriteContext coordinatorRewriteContext) { final MappedFieldType fieldType = coordinatorRewriteContext.getFieldType(fieldName); if (fieldType instanceof final DateFieldMapper.DateFieldType dateFieldType) { - if (coordinatorRewriteContext.hasTimestampData() == false) { + IndexLongFieldRange fieldRange = coordinatorRewriteContext.getFieldRange(fieldName); + if (fieldRange.isComplete() == false || fieldRange == IndexLongFieldRange.EMPTY) { + // if not all shards for this (frozen) index have reported ranges to cluster state, OR if they + // have reported in and the range is empty (no data for that field), then return DISJOINT in order + // to rewrite the query to MatchNone return MappedFieldType.Relation.DISJOINT; } - long minTimestamp = coordinatorRewriteContext.getMinTimestamp(); - long maxTimestamp = coordinatorRewriteContext.getMaxTimestamp(); + if (fieldRange == IndexLongFieldRange.UNKNOWN) { + // do a full search if UNKNOWN for whatever reason (e.g., event.ingested is UNKNOWN in a + // mixed-cluster where nodes with a version before event.ingested was added to cluster state) + return MappedFieldType.Relation.INTERSECTS; + } DateMathParser dateMathParser = getForceDateParser(); return dateFieldType.isFieldWithinQuery( - minTimestamp, - maxTimestamp, + fieldRange.getMin(), + fieldRange.getMax(), from, to, includeLower, diff --git a/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java new file mode 100644 index 0000000000000..b631806e3ce95 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/DateFieldRangeInfo.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.shard.IndexLongFieldRange; + +/** + * Data holder of timestamp fields held in cluster state IndexMetadata. + * @param timestampFieldType field type for the @timestamp field + * @param timestampRange min/max range for the @timestamp field (in a specific index) + * @param eventIngestedFieldType field type for the 'event.ingested' field + * @param eventIngestedRange min/max range for the 'event.ingested' field (in a specific index) + */ +public record DateFieldRangeInfo( + DateFieldMapper.DateFieldType timestampFieldType, + IndexLongFieldRange timestampRange, + DateFieldMapper.DateFieldType eventIngestedFieldType, + IndexLongFieldRange eventIngestedRange +) { + +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 82a5c96bb7dc2..decc082d314e6 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; @@ -1766,7 +1765,13 @@ public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { } public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { - return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); + return new CoordinatorRewriteContextProvider( + parserConfig, + client, + nowInMillis, + clusterService::state, + this::getTimestampFieldTypeInfo + ); } /** @@ -1856,14 +1861,16 @@ public boolean allPendingDanglingIndicesWritten() { } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, or - * - the field is not a timestamp field. + * - the mapping is not known yet, or + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - return timestampFieldMapperService.getTimestampFieldType(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + return timestampFieldMapperService.getTimestampFieldTypeInfo(index); } public IndexScopedSettings getIndexScopedSettings() { diff --git a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java index 4caeaef6514e5..b139fca5c2acc 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimestampFieldMapperService.java @@ -42,8 +42,9 @@ import static org.elasticsearch.core.Strings.format; /** - * Tracks the mapping of the {@code @timestamp} field of immutable indices that expose their timestamp range in their index metadata. - * Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of this one field from the mapping here. + * Tracks the mapping of the '@timestamp' and 'event.ingested' fields of immutable indices that expose their timestamp range in their + * index metadata. Coordinating nodes do not have (easy) access to mappings for all indices, so we extract the type of these two fields + * from the mapping here, since timestamp fields can have millis or nanos level resolution. */ public class TimestampFieldMapperService extends AbstractLifecycleComponent implements ClusterStateApplier { @@ -53,10 +54,12 @@ public class TimestampFieldMapperService extends AbstractLifecycleComponent impl private final ExecutorService executor; // single thread to construct mapper services async as needed /** - * The type of the {@code @timestamp} field keyed by index. Futures may be completed with {@code null} to indicate that there is - * no usable {@code @timestamp} field. + * The type of the 'event.ingested' and/or '@timestamp' fields keyed by index. + * The inner map is keyed by field name ('@timestamp' or 'event.ingested'). + * Futures may be completed with {@code null} to indicate that there is + * no usable timestamp field. */ - private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); + private final Map> fieldTypesByIndex = ConcurrentCollections.newConcurrentMap(); public TimestampFieldMapperService(Settings settings, ThreadPool threadPool, IndicesService indicesService) { this.indicesService = indicesService; @@ -103,7 +106,7 @@ public void applyClusterState(ClusterChangedEvent event) { if (hasUsefulTimestampField(indexMetadata) && fieldTypesByIndex.containsKey(index) == false) { logger.trace("computing timestamp mapping for {}", index); - final PlainActionFuture future = new PlainActionFuture<>(); + final PlainActionFuture future = new PlainActionFuture<>(); fieldTypesByIndex.put(index, future); final IndexService indexService = indicesService.indexService(index); @@ -148,29 +151,45 @@ private static boolean hasUsefulTimestampField(IndexMetadata indexMetadata) { return true; } - final IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); - return timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN; + IndexLongFieldRange timestampRange = indexMetadata.getTimestampRange(); + if (timestampRange.isComplete() && timestampRange != IndexLongFieldRange.UNKNOWN) { + return true; + } + + IndexLongFieldRange eventIngestedRange = indexMetadata.getEventIngestedRange(); + return eventIngestedRange.isComplete() && eventIngestedRange != IndexLongFieldRange.UNKNOWN; } - private static DateFieldMapper.DateFieldType fromMapperService(MapperService mapperService) { - final MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); - if (mappedFieldType instanceof DateFieldMapper.DateFieldType) { - return (DateFieldMapper.DateFieldType) mappedFieldType; - } else { + private static DateFieldRangeInfo fromMapperService(MapperService mapperService) { + DateFieldMapper.DateFieldType timestampFieldType = null; + DateFieldMapper.DateFieldType eventIngestedFieldType = null; + + MappedFieldType mappedFieldType = mapperService.fieldType(DataStream.TIMESTAMP_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + timestampFieldType = dateFieldType; + } + mappedFieldType = mapperService.fieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + if (mappedFieldType instanceof DateFieldMapper.DateFieldType dateFieldType) { + eventIngestedFieldType = dateFieldType; + } + if (timestampFieldType == null && eventIngestedFieldType == null) { return null; } + // the mapper only fills in the field types, not the actual range values + return new DateFieldRangeInfo(timestampFieldType, null, eventIngestedFieldType, null); } /** - * @return the field type of the {@code @timestamp} field of the given index, or {@code null} if: + * @return DateFieldRangeInfo holding the field types of the {@code @timestamp} and {@code event.ingested} fields of the index. + * or {@code null} if: * - the index is not found, * - the field is not found, * - the mapping is not known yet, or - * - the field is not a timestamp field. + * - the index does not have a useful timestamp field. */ @Nullable - public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { - final PlainActionFuture future = fieldTypesByIndex.get(index); + public DateFieldRangeInfo getTimestampFieldTypeInfo(Index index) { + final PlainActionFuture future = fieldTypesByIndex.get(index); if (future == null || future.isDone() == false) { return null; } @@ -181,5 +200,4 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { throw new UncategorizedExecutionException("An error occurred fetching timestamp field type for " + index, e); } } - } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 70c4d73f578b3..c450fd8a9c39c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.CanMatchNodeResponse.ResponseOrFailure; @@ -26,8 +27,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -38,6 +37,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -72,6 +72,7 @@ import static org.elasticsearch.action.search.SearchAsyncActionTests.getShardsIter; import static org.elasticsearch.core.Types.forciblyCast; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; @@ -464,7 +465,17 @@ public void sendCanMatch( } } - public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exception { + // test using @timestamp + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingTimestamp() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(DataStream.TIMESTAMP_FIELD_NAME); + } + + // test using event.ingested + public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedUsingEventIngested() throws Exception { + doCanMatchFilteringOnCoordinatorThatCanBeSkipped(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + } + + public void doCanMatchFilteringOnCoordinatorThatCanBeSkipped(String timestampField) throws Exception { Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); @@ -475,15 +486,10 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timestampField, indexMinTimestamp, indexMaxTimestamp); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timestampField); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -516,12 +522,12 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio // When all the shards can be skipped we should query at least 1 // in order to get a valid search response. if (regularIndexShardCount == 0) { - assertThat(nonSkippedShards.size(), equalTo(1)); + assertThat(nonSkippedShards.size(), equalTo(1)); // FIXME - fails here with expected 1 but was 11 OR } else { boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); - assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); // FIXME - OR fails here with "false" } boolean allSkippedShardAreFromDataStream = skippedShards.stream() @@ -535,26 +541,107 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkipped() throws Exceptio ); } - public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { - Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); - Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + public void testCoordinatorCanMatchFilteringThatCanBeSkippedUsingBothTimestamps() throws Exception { + Index dataStreamIndex1 = new Index(".ds-twoTimestamps0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-twoTimestamps0002", UUIDs.base64UUID()); DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); - List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + List regularIndices = randomList(1, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); long indexMinTimestamp = randomLongBetween(0, 5000); long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( + // use same range for both @timestamp and event.ingested + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, + indexMinTimestamp, + indexMaxTimestamp, indexMinTimestamp, indexMaxTimestamp ); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + /** + * Expected behavior: if either @timestamp or 'event.ingested' filters in the query are "out of range" (do not + * overlap the range in cluster state), then all shards in the datastream should be skipped. + * Only if both @timestamp or 'event.ingested' filters are "in range" should the data stream shards be searched + */ + boolean timestampQueryOutOfRange = randomBoolean(); + boolean eventIngestedQueryOutOfRange = randomBoolean(); + int timestampOffset = timestampQueryOutOfRange ? 1 : -500; + int eventIngestedOffset = eventIngestedQueryOutOfRange ? 1 : -500; + + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMaxTimestamp + timestampOffset).to(indexMaxTimestamp + 2); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMaxTimestamp + eventIngestedOffset).to(indexMaxTimestamp + 2); + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder().filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + (updatedSearchShardIterators, requests) -> { + List skippedShards = updatedSearchShardIterators.stream().filter(SearchShardIterator::skip).toList(); + List nonSkippedShards = updatedSearchShardIterators.stream() + .filter(searchShardIterator -> searchShardIterator.skip() == false) + .toList(); + + if (timestampQueryOutOfRange || eventIngestedQueryOutOfRange) { + // data stream shards should have been skipped + assertThat(skippedShards.size(), greaterThan(0)); + boolean allSkippedShardAreFromDataStream = skippedShards.stream() + .allMatch(shardIterator -> dataStream.getIndices().contains(shardIterator.shardId().getIndex())); + assertThat(allSkippedShardAreFromDataStream, equalTo(true)); + + boolean allNonSkippedShardsAreFromRegularIndices = nonSkippedShards.stream() + .allMatch(shardIterator -> regularIndices.contains(shardIterator.shardId().getIndex())); + assertThat(allNonSkippedShardsAreFromRegularIndices, equalTo(true)); + + boolean allRequestsWereTriggeredAgainstRegularIndices = requests.stream() + .allMatch(request -> regularIndices.contains(request.shardId().getIndex())); + assertThat(allRequestsWereTriggeredAgainstRegularIndices, equalTo(true)); + + } else { + assertThat(skippedShards.size(), equalTo(0)); + long countSkippedShardsFromDatastream = nonSkippedShards.stream() + .filter(iter -> dataStream.getIndices().contains(iter.shardId().getIndex())) + .count(); + assertThat(countSkippedShardsFromDatastream, greaterThan(0L)); + } + } + ); + } + + public void testCanMatchFilteringOnCoordinatorParsingFails() throws Exception { + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + long indexMinTimestamp = randomLongBetween(0, 5000); + long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); + } + + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // Query with a non default date format rangeQueryBuilder.from("2020-1-01").to("2021-1-01"); @@ -585,23 +672,20 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + long indexMinTimestamp = 10; long indexMaxTimestamp = 20; StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index dataStreamIndex : dataStream.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps( - dataStreamIndex, - DataStream.TIMESTAMP_FIELD_NAME, - indexMinTimestamp, - indexMaxTimestamp - ); + contextProviderBuilder.addIndexMinMaxTimestamps(dataStreamIndex, timeField, indexMinTimestamp, indexMaxTimestamp); } BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); // Query inside of the data stream index range if (randomBoolean()) { // Query generation - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField); // We query a range within the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMinTimestamp).to(indexMaxTimestamp); @@ -614,8 +698,7 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep } } else { // We query a range outside of the timestamp range covered by both datastream indices - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestamp + 1) - .to(indexMaxTimestamp + 2); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(timeField).from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); @@ -635,17 +718,86 @@ public void testCanMatchFilteringOnCoordinatorThatCanNotBeSkipped() throws Excep ); } + public void testCanMatchFilteringOnCoordinatorWithTimestampAndEventIngestedThatCanNotBeSkipped() throws Exception { + // Generate indices + Index dataStreamIndex1 = new Index(".ds-mydata0001", UUIDs.base64UUID()); + Index dataStreamIndex2 = new Index(".ds-mydata0002", UUIDs.base64UUID()); + DataStream dataStream = DataStreamTestHelper.newInstance("mydata", List.of(dataStreamIndex1, dataStreamIndex2)); + + List regularIndices = randomList(0, 2, () -> new Index(randomAlphaOfLength(10), UUIDs.base64UUID())); + + long indexMinTimestampForTs = 10; + long indexMaxTimestampForTs = 20; + long indexMinTimestampForEventIngested = 10; + long indexMaxTimestampForEventIngested = 20; + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + for (Index dataStreamIndex : dataStream.getIndices()) { + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested( + dataStreamIndex, + indexMinTimestampForTs, + indexMaxTimestampForTs, + indexMinTimestampForEventIngested, + indexMaxTimestampForEventIngested + ); + } + + BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); + // Query inside of the data stream index range + if (randomBoolean()) { + // Query generation + // We query a range within both timestamp ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); + tsRangeQueryBuilder.from(indexMinTimestampForTs).to(indexMaxTimestampForTs); + + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME); + eventIngestedRangeQueryBuilder.from(indexMinTimestampForEventIngested).to(indexMaxTimestampForEventIngested); + + queryBuilder.filter(tsRangeQueryBuilder).filter(eventIngestedRangeQueryBuilder); + + if (randomBoolean()) { + // Add an additional filter that cannot be evaluated in the coordinator but shouldn't + // affect the end result as we're filtering + queryBuilder.filter(new TermQueryBuilder("fake", "value")); + } + } else { + // We query a range outside of the both ranges covered by both datastream indices + RangeQueryBuilder tsRangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(indexMaxTimestampForTs + 1) + .to(indexMaxTimestampForTs + 2); + RangeQueryBuilder eventIngestedRangeQueryBuilder = new RangeQueryBuilder(IndexMetadata.EVENT_INGESTED_FIELD_NAME).from( + indexMaxTimestampForEventIngested + 1 + ).to(indexMaxTimestampForEventIngested + 2); + + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("fake", "value"); + + // This is always evaluated as true in the coordinator as we cannot determine there if + // the term query clause is false. + queryBuilder.should(tsRangeQueryBuilder).should(eventIngestedRangeQueryBuilder).should(termQueryBuilder); + } + + assignShardsAndExecuteCanMatchPhase( + List.of(dataStream), + regularIndices, + contextProviderBuilder.build(), + queryBuilder, + List.of(), + null, + this::assertAllShardsAreQueried + ); + } + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withDefaultBackgroundFilter() throws Exception { Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timeField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timeField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timeField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timeField, 2000, 2999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timeField).from(2100).to(2200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms"); assignShardsAndExecuteCanMatchPhase( @@ -661,20 +813,22 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w } public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withBackgroundFilter() throws Exception { + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + Index index1 = new Index("index1", UUIDs.base64UUID()); Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); Index index4 = new Index("index4", UUIDs.base64UUID()); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); - contextProviderBuilder.addIndexMinMaxTimestamps(index4, DataStream.TIMESTAMP_FIELD_NAME, 3000, 3999); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + contextProviderBuilder.addIndexMinMaxTimestamps(index4, timestampField, 3000, 3999); - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(3100).to(3200)); + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(3100).to(3200)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(0).to(1999) + new RangeQueryBuilder(timestampField).from(0).to(1999) ); assignShardsAndExecuteCanMatchPhase( @@ -703,14 +857,53 @@ public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_w Index index2 = new Index("index2", UUIDs.base64UUID()); Index index3 = new Index("index3", UUIDs.base64UUID()); + String timestampField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); + contextProviderBuilder.addIndexMinMaxTimestamps(index1, timestampField, 0, 999); + contextProviderBuilder.addIndexMinMaxTimestamps(index2, timestampField, 1000, 1999); + contextProviderBuilder.addIndexMinMaxTimestamps(index3, timestampField, 2000, 2999); + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(timestampField).from(2100).to(2200)); + AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( + new RangeQueryBuilder(timestampField).from(2000).to(2300) + ); + SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); + + assignShardsAndExecuteCanMatchPhase( + List.of(), + List.of(index1, index2, index3), + contextProviderBuilder.build(), + query, + List.of(aggregation), + suggest, + // The query and aggregation and match only index3, but suggest should match everything. + this::assertAllShardsAreQueried + ); + } + + public void testCanMatchFilteringOnCoordinator_withSignificantTermsAggregation_withSuggest_withTwoTimestamps() throws Exception { + Index index1 = new Index("index1", UUIDs.base64UUID()); + Index index2 = new Index("index2", UUIDs.base64UUID()); + Index index3 = new Index("index3", UUIDs.base64UUID()); + StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); - contextProviderBuilder.addIndexMinMaxTimestamps(index1, DataStream.TIMESTAMP_FIELD_NAME, 0, 999); - contextProviderBuilder.addIndexMinMaxTimestamps(index2, DataStream.TIMESTAMP_FIELD_NAME, 1000, 1999); - contextProviderBuilder.addIndexMinMaxTimestamps(index3, DataStream.TIMESTAMP_FIELD_NAME, 2000, 2999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index1, 0, 999, 0, 999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index2, 1000, 1999, 1000, 1999); + contextProviderBuilder.addIndexMinMaxForTimestampAndEventIngested(index3, 2000, 2999, 2000, 2999); + + String fieldInRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + String fieldOutOfRange = DataStream.TIMESTAMP_FIELD_NAME; - QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2100).to(2200)); + if (randomBoolean()) { + fieldInRange = DataStream.TIMESTAMP_FIELD_NAME; + fieldOutOfRange = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + } + + QueryBuilder query = new BoolQueryBuilder().filter(new RangeQueryBuilder(fieldInRange).from(2100).to(2200)) + .filter(new RangeQueryBuilder(fieldOutOfRange).from(8888).to(9999)); AggregationBuilder aggregation = new SignificantTermsAggregationBuilder("significant_terms").backgroundFilter( - new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME).from(2000).to(2300) + new RangeQueryBuilder(fieldInRange).from(2000).to(2300) ); SuggestBuilder suggest = new SuggestBuilder().setGlobalText("test"); @@ -744,13 +937,13 @@ public void testCanMatchFilteringOnCoordinatorThatCanBeSkippedTsdb() throws Exce long indexMaxTimestamp = randomLongBetween(indexMinTimestamp, 5000 * 2); StaticCoordinatorRewriteContextProviderBuilder contextProviderBuilder = new StaticCoordinatorRewriteContextProviderBuilder(); for (Index index : dataStream1.getIndices()) { - contextProviderBuilder.addIndexMinMaxTimestamps(index, indexMinTimestamp, indexMaxTimestamp); + contextProviderBuilder.addIndexMinMaxTimestamps(index, DataStream.TIMESTAMP_FIELD_NAME, indexMinTimestamp, indexMaxTimestamp); } for (Index index : dataStream2.getIndices()) { contextProviderBuilder.addIndex(index); } - RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp"); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder(DataStream.TIMESTAMP_FIELD_NAME); // We query a range outside of the timestamp range covered by both datastream indices rangeQueryBuilder.from(indexMaxTimestamp + 1).to(indexMaxTimestamp + 2); @@ -954,9 +1147,9 @@ public void sendCanMatch( canMatchResultsConsumer.accept(updatedSearchShardIterators, requests); } - private static class StaticCoordinatorRewriteContextProviderBuilder { + static class StaticCoordinatorRewriteContextProviderBuilder { private ClusterState clusterState = ClusterState.EMPTY_STATE; - private final Map fields = new HashMap<>(); + private final Map fields = new HashMap<>(); private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTimeStamp, long maxTimestamp) { if (clusterState.metadata().index(index) != null) { @@ -974,35 +1167,64 @@ private void addIndexMinMaxTimestamps(Index index, String fieldName, long minTim IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0) - .timestampRange(timestampRange); + .numberOfReplicas(0); + if (fieldName.equals(DataStream.TIMESTAMP_FIELD_NAME)) { + indexMetadataBuilder.timestampRange(timestampRange); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(fieldName), null, null, null)); + } else if (fieldName.equals(IndexMetadata.EVENT_INGESTED_FIELD_NAME)) { + indexMetadataBuilder.eventIngestedRange(timestampRange, TransportVersion.current()); + fields.put(index, new DateFieldRangeInfo(null, null, new DateFieldMapper.DateFieldType(fieldName), null)); + } Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); - clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - - fields.put(index, new DateFieldMapper.DateFieldType(fieldName)); } - private void addIndexMinMaxTimestamps(Index index, long minTimestamp, long maxTimestamp) { + /** + * Add min/max timestamps to IndexMetadata for the specified index for both @timestamp and 'event.ingested' + */ + private void addIndexMinMaxForTimestampAndEventIngested( + Index index, + long minTimestampForTs, + long maxTimestampForTs, + long minTimestampForEventIngested, + long maxTimestampForEventIngested + ) { if (clusterState.metadata().index(index) != null) { throw new IllegalArgumentException("Min/Max timestamps for " + index + " were already defined"); } - Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "a_field") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(minTimestamp)) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(maxTimestamp)); + IndexLongFieldRange tsTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForTs, maxTimestampForTs) + ); + IndexLongFieldRange eventIngestedTimestampRange = IndexLongFieldRange.NO_SHARDS.extendWithShardRange( + 0, + 1, + ShardLongFieldRange.of(minTimestampForEventIngested, maxTimestampForEventIngested) + ); + + Settings.Builder indexSettings = settings(IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(index.getName()) .settings(indexSettings) .numberOfShards(1) - .numberOfReplicas(0); + .numberOfReplicas(0) + .timestampRange(tsTimestampRange) + .eventIngestedRange(eventIngestedTimestampRange, TransportVersion.current()); Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put( + index, + new DateFieldRangeInfo( + new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), + null, + new DateFieldMapper.DateFieldType(IndexMetadata.EVENT_INGESTED_FIELD_NAME), + null + ) + ); } private void addIndex(Index index) { @@ -1018,7 +1240,7 @@ private void addIndex(Index index) { Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(indexMetadataBuilder); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); - fields.put(index, new DateFieldMapper.DateFieldType("@timestamp")); + fields.put(index, new DateFieldRangeInfo(new DateFieldMapper.DateFieldType(DataStream.TIMESTAMP_FIELD_NAME), null, null, null)); } public CoordinatorRewriteContextProvider build() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 2a3cc3a248f45..1634572e0b6b7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -59,6 +59,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -623,13 +624,13 @@ QueryRewriteContext createQueryRewriteContext() { } CoordinatorRewriteContext createCoordinatorContext(DateFieldMapper.DateFieldType dateFieldType, long min, long max) { - return new CoordinatorRewriteContext( - parserConfiguration, - this.client, - () -> nowInMillis, + DateFieldRangeInfo timestampFieldInfo = new DateFieldRangeInfo( + dateFieldType, IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)), - dateFieldType + dateFieldType, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(min, max)) ); + return new CoordinatorRewriteContext(parserConfiguration, this.client, () -> nowInMillis, timestampFieldInfo); } DataRewriteContext createDataContext() { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 36d4751423113..ad9900b5b0164 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.protocol.xpack.frozen.FreezeRequest; @@ -44,6 +45,7 @@ import java.time.Instant; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING; @@ -76,8 +78,15 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx createIndex("index", 1, 1); - final DocWriteResponse indexResponse = prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, "2010-01-06T02:03:04.567Z") - .get(); + String timestampVal = "2010-01-06T02:03:04.567Z"; + String eventIngestedVal = "2010-01-06T02:03:05.567Z"; // one second later + + final DocWriteResponse indexResponse = prepareIndex("index").setSource( + DataStream.TIMESTAMP_FIELD_NAME, + timestampVal, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedVal + ).get(); ensureGreen("index"); @@ -117,13 +126,23 @@ public void testTimestampRangeRecalculatedOnStalePrimaryAllocation() throws IOEx assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); assertThat(timestampFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertTrue(timestampFieldRange.isComplete()); - assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); - assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse("2010-01-06T02:03:04.567Z").toEpochMilli())); + assertThat(timestampFieldRange.getMin(), equalTo(Instant.parse(timestampVal).toEpochMilli())); + assertThat(timestampFieldRange.getMax(), equalTo(Instant.parse(timestampVal).toEpochMilli())); - assertThat(indexMetadata.getEventIngestedRange(), sameInstance(IndexLongFieldRange.UNKNOWN)); + IndexLongFieldRange eventIngestedFieldRange = clusterAdmin().prepareState() + .get() + .getState() + .metadata() + .index("index") + .getEventIngestedRange(); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.UNKNOWN))); + assertThat(eventIngestedFieldRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertTrue(eventIngestedFieldRange.isComplete()); + assertThat(eventIngestedFieldRange.getMin(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); + assertThat(eventIngestedFieldRange.getMax(), equalTo(Instant.parse(eventIngestedVal).toEpochMilli())); } - public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception { + public void testTimestampAndEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { internalCluster().startNodes(between(2, 4)); final String locale; @@ -181,11 +200,11 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ensureGreen("index"); if (randomBoolean()) { - prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date).get(); + prepareIndex("index").setSource(DataStream.TIMESTAMP_FIELD_NAME, date, IndexMetadata.EVENT_INGESTED_FIELD_NAME, date).get(); } for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } assertAcked( @@ -193,15 +212,129 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); assertBusy(() -> { - final DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(index); + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.timestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.eventIngestedFieldType(); + assertNotNull(eventIngestedFieldType); assertNotNull(timestampFieldType); - timestampFieldTypeFuture.onResponse(timestampFieldType); + future.onResponse( + Map.of( + DataStream.TIMESTAMP_FIELD_NAME, + timestampFieldType, + IndexMetadata.EVENT_INGESTED_FIELD_NAME, + eventIngestedFieldType + ) + ); + }); + assertTrue(future.isDone()); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(DataStream.TIMESTAMP_FIELD_NAME).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertThat(future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat( + future.get().get(IndexMetadata.EVENT_INGESTED_FIELD_NAME).dateTimeFormatter().parseMillis(date), + equalTo(1580817683000L) + ); + } + + assertAcked( + client().execute( + FreezeIndexAction.INSTANCE, + new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index").setFreeze(false) + ).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + } + + public void testTimestampOrEventIngestedFieldTypeExposedByAllIndicesServices() throws Exception { + internalCluster().startNodes(between(2, 4)); + + final String locale; + final String date; + + switch (between(1, 3)) { + case 1 -> { + locale = ""; + date = "04 Feb 2020 12:01:23Z"; + } + case 2 -> { + locale = "en_GB"; + date = "04 Feb 2020 12:01:23Z"; + } + case 3 -> { + locale = "fr_FR"; + date = "04 févr. 2020 12:01:23Z"; + } + default -> throw new AssertionError("impossible"); + } + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + assertAcked( + prepareCreate("index").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject(timeField) + .field("type", "date") + .field("format", "dd LLL yyyy HH:mm:ssX") + .field("locale", locale) + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + + final Index index = clusterAdmin().prepareState() + .clear() + .setIndices("index") + .setMetadata(true) + .get() + .getState() + .metadata() + .index("index") + .getIndex(); + + ensureGreen("index"); + if (randomBoolean()) { + prepareIndex("index").setSource(timeField, date).get(); + } + + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + assertNull(indicesService.getTimestampFieldTypeInfo(index)); + } + + assertAcked( + client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "index")).actionGet() + ); + ensureGreen("index"); + for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + // final PlainActionFuture timestampFieldTypeFuture = new PlainActionFuture<>(); + final PlainActionFuture> future = new PlainActionFuture<>(); + assertBusy(() -> { + DateFieldRangeInfo timestampsFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(index); + DateFieldMapper.DateFieldType timestampFieldType = timestampsFieldTypeInfo.timestampFieldType(); + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampsFieldTypeInfo.eventIngestedFieldType(); + if (timeField == DataStream.TIMESTAMP_FIELD_NAME) { + assertNotNull(timestampFieldType); + assertNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, timestampFieldType)); + } else { + assertNull(timestampFieldType); + assertNotNull(eventIngestedFieldType); + future.onResponse(Map.of(timeField, eventIngestedFieldType)); + } }); - assertTrue(timestampFieldTypeFuture.isDone()); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().locale().toString(), equalTo(locale)); - assertThat(timestampFieldTypeFuture.get().dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); + assertTrue(future.isDone()); + assertThat(future.get().get(timeField).dateTimeFormatter().locale().toString(), equalTo(locale)); + assertThat(future.get().get(timeField).dateTimeFormatter().parseMillis(date), equalTo(1580817683000L)); } assertAcked( @@ -212,7 +345,7 @@ public void testTimestampFieldTypeExposedByAllIndicesServices() throws Exception ); ensureGreen("index"); for (final IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { - assertNull(indicesService.getTimestampFieldType(index)); + assertNull(indicesService.getTimestampFieldTypeInfo(index)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 5204bdfcc78e6..d5e87558d1ced 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.indices.DateFieldRangeInfo; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; @@ -100,11 +101,11 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + createIndexWithTimestampAndEventIngested(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -117,11 +118,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Either add data outside of the range, or documents that don't have timestamp data final boolean indexDataWithTimestamp = randomBoolean(); // Add enough documents to have non-metadata segment files in all shards, - // otherwise the mount operation might go through as the read won't be - // blocked + // otherwise the mount operation might go through as the read won't be blocked final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); if (indexDataWithTimestamp) { - indexDocumentsWithTimestampWithinDate( + indexDocumentsWithTimestampAndEventIngestedDates( indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE @@ -132,7 +132,7 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying // Index enough documents to ensure that all shards have at least some documents int numDocsWithinRange = between(100, 1000); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -166,9 +166,10 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean(); List indicesToSearch = new ArrayList<>(); @@ -176,7 +177,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying indicesToSearch.add(indexWithinSearchRange); } indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -250,20 +253,44 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + + // check that @timestamp and 'event.ingested' are now in cluster state final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + final DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.timestampFieldType(); + assertThat(timestampDataFieldType, notNullValue()); + final DateFieldMapper.DateFieldType eventIngestedDataFieldType = timestampFieldTypeInfo.eventIngestedFieldType(); + assertThat(eventIngestedDataFieldType, notNullValue()); + + final DateFieldMapper.Resolution timestampResolution = timestampDataFieldType.resolution(); + final DateFieldMapper.Resolution eventIngestedResolution = eventIngestedDataFieldType.resolution(); if (indexDataWithTimestamp) { assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); assertThat( updatedTimestampMillisRange.getMin(), - greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) ); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); } else { assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY)); + assertThat(updatedEventIngestedRange, sameInstance(IndexLongFieldRange.EMPTY)); } // Stop the node holding the searchable snapshots, and since we defined @@ -383,6 +410,171 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } + /** + * Test shard skipping when only 'event.ingested' is in the index and cluster state. + */ + public void testEventIngestedRangeInSearchAgainstSearchableSnapshotShards() throws Exception { + internalCluster().startMasterOnlyNode(); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode(); + final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode(); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot); + + final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); + + final String timestampField = IndexMetadata.EVENT_INGESTED_FIELD_NAME; + + createIndexWithOnlyOneTimestampField(timestampField, indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY); + + final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); + createIndexWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + indexWithinSearchRangeShardCount, + Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex) + .build() + ); + + final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount; + + // Add enough documents to have non-metadata segment files in all shards, + // otherwise the mount operation might go through as the read won't be blocked + final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000); + + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexOutsideSearchRange, + numberOfDocsInIndexOutsideSearchRange, + TIMESTAMP_TEMPLATE_OUTSIDE_RANGE + ); + + // Index enough documents to ensure that all shards have at least some documents + int numDocsWithinRange = between(100, 1000); + indexDocumentsWithOnlyOneTimestampField( + timestampField, + indexWithinSearchRange, + numDocsWithinRange, + TIMESTAMP_TEMPLATE_WITHIN_RANGE + ); + + final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createRepository(repositoryName, "mock"); + + final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId(); + assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange)); + + final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + // Block the repository for the node holding the searchable snapshot shards + // to delay its restore + blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot); + + // Force the searchable snapshot to be allocated in a particular node + Settings restoredIndexSettings = Settings.builder() + .put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot) + .build(); + + final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest( + TEST_REQUEST_TIMEOUT, + searchableSnapshotIndexOutsideSearchRange, + repositoryName, + snapshotId.getName(), + indexOutsideSearchRange, + restoredIndexSettings, + Strings.EMPTY_ARRAY, + false, + randomFrom(MountSearchableSnapshotRequest.Storage.values()) + ); + client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet(); + + final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + + // Allow the searchable snapshots to be finally mounted + unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot); + waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange); + ensureGreen(searchableSnapshotIndexOutsideSearchRange); + + IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); + IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); + IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + + // @timestamp range should be null since it was not included in the index or indexed docs + assertThat(updatedTimestampMillisRange, equalTo(IndexLongFieldRange.UNKNOWN)); + assertThat(updatedEventIngestedMillisRange, not(equalTo(IndexLongFieldRange.UNKNOWN))); + + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + + DateFieldMapper.DateFieldType timestampDataFieldType = timestampFieldTypeInfo.timestampFieldType(); + assertThat(timestampDataFieldType, nullValue()); + + DateFieldMapper.DateFieldType eventIngestedFieldType = timestampFieldTypeInfo.eventIngestedFieldType(); + assertThat(eventIngestedFieldType, notNullValue()); + + DateFieldMapper.Resolution eventIngestedResolution = eventIngestedFieldType.resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); + + // now do a search against event.ingested + List indicesToSearch = new ArrayList<>(); + indicesToSearch.add(indexWithinSearchRange); + indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange); + + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2020-11-28T00:00:00.000000000Z", true) + .to("2020-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + // All the regular index searches succeeded + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + // All the searchable snapshots shards were skipped + assertThat(searchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(searchShardResult.notSkipped().size(), equalTo(indexWithinSearchRangeShardCount)); + } + + // query a range that covers both indexes - all shards should be searched, none skipped + { + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) + .from("2019-11-28T00:00:00.000000000Z", true) + .to("2021-11-29T00:00:00.000000000Z"); + + SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])) + .source(new SearchSourceBuilder().query(rangeQuery)); + + assertResponse(client().search(request), searchResponse -> { + assertThat(searchResponse.getSuccessfulShards(), equalTo(totalShards)); + assertThat(searchResponse.getFailedShards(), equalTo(0)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getTotalShards(), equalTo(totalShards)); + }); + + SearchShardAPIResult searchShardResult = doSearchShardAPIQuery(indicesToSearch, rangeQuery, true, totalShards); + assertThat(searchShardResult.skipped().size(), equalTo(0)); + assertThat(searchShardResult.notSkipped().size(), equalTo(totalShards)); + } + } + /** * Can match against searchable snapshots is tested via both the Search API and the SearchShards (transport-only) API. * The latter is a way to do only a can-match rather than all search phases. @@ -396,7 +588,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder() @@ -404,7 +596,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() .build() ); - indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -438,11 +630,14 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + final String timestampField = randomFrom(DataStream.TIMESTAMP_FIELD_NAME, IndexMetadata.EVENT_INGESTED_FIELD_NAME); + + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timestampField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -500,14 +695,29 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() ensureGreen(searchableSnapshotIndexOutsideSearchRange); final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange); - final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); - assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); - assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + + final IndexLongFieldRange updatedTimestampRange = updatedIndexMetadata.getTimestampRange(); + DateFieldMapper.Resolution tsResolution = timestampFieldTypeInfo.timestampFieldType().resolution(); + ; + assertThat(updatedTimestampRange.isComplete(), equalTo(true)); + assertThat(updatedTimestampRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat(updatedTimestampRange.getMin(), greaterThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-26T00:00:00Z")))); + assertThat(updatedTimestampRange.getMax(), lessThanOrEqualTo(tsResolution.convert(Instant.parse("2020-11-27T00:00:00Z")))); + + final IndexLongFieldRange updatedEventIngestedRange = updatedIndexMetadata.getEventIngestedRange(); + DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.eventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-26T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-27T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -579,7 +789,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3); - createIndexWithTimestamp( + createIndexWithTimestampAndEventIngested( indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder() @@ -587,7 +797,7 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo .build() ); - indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); + indexDocumentsWithTimestampAndEventIngestedDates(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE); final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); createRepository(repositoryName, "mock"); @@ -621,11 +831,13 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); + assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); - DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex()); - assertThat(timestampFieldType, nullValue()); + DateFieldRangeInfo timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(indexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, nullValue()); - RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME) + String timeField = randomFrom(IndexMetadata.EVENT_INGESTED_FIELD_NAME, DataStream.TIMESTAMP_FIELD_NAME); + RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(timeField) .from("2020-11-28T00:00:00.000000000Z", true) .to("2020-11-29T00:00:00.000000000Z"); @@ -680,13 +892,32 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange); final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange(); - final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex()); - assertThat(dateFieldType, notNullValue()); - final DateFieldMapper.Resolution resolution = dateFieldType.resolution(); + timestampFieldTypeInfo = indicesService.getTimestampFieldTypeInfo(updatedIndexMetadata.getIndex()); + assertThat(timestampFieldTypeInfo, notNullValue()); + final DateFieldMapper.Resolution timestampResolution = timestampFieldTypeInfo.timestampFieldType().resolution(); assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true)); assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); - assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z")))); - assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z")))); + assertThat( + updatedTimestampMillisRange.getMin(), + greaterThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedTimestampMillisRange.getMax(), + lessThanOrEqualTo(timestampResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); + + final IndexLongFieldRange updatedEventIngestedMillisRange = updatedIndexMetadata.getEventIngestedRange(); + final DateFieldMapper.Resolution eventIngestedResolution = timestampFieldTypeInfo.eventIngestedFieldType().resolution(); + assertThat(updatedEventIngestedMillisRange.isComplete(), equalTo(true)); + assertThat(updatedEventIngestedMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY))); + assertThat( + updatedEventIngestedMillisRange.getMin(), + greaterThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-28T00:00:00Z"))) + ); + assertThat( + updatedEventIngestedMillisRange.getMax(), + lessThanOrEqualTo(eventIngestedResolution.convert(Instant.parse("2020-11-29T00:00:00Z"))) + ); // Stop the node holding the searchable snapshots, and since we defined // the index allocation criteria to require the searchable snapshot @@ -724,17 +955,24 @@ public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCo } } - private void createIndexWithTimestamp(String indexName, int numShards, Settings extraSettings) throws IOException { + private void createIndexWithTimestampAndEventIngested(String indexName, int numShards, Settings extraSettings) throws IOException { assertAcked( indicesAdmin().prepareCreate(indexName) .setMapping( XContentFactory.jsonBuilder() .startObject() .startObject("properties") + .startObject(DataStream.TIMESTAMP_FIELD_NAME) .field("type", randomFrom("date", "date_nanos")) .field("format", "strict_date_optional_time_nanos") .endObject() + + .startObject(IndexMetadata.EVENT_INGESTED_FIELD_NAME) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + .endObject() .endObject() ) @@ -743,12 +981,70 @@ private void createIndexWithTimestamp(String indexName, int numShards, Settings ensureGreen(indexName); } - private void indexDocumentsWithTimestampWithinDate(String indexName, int docCount, String timestampTemplate) throws Exception { + private void createIndexWithOnlyOneTimestampField(String timestampField, String index, int numShards, Settings extraSettings) + throws IOException { + assertAcked( + indicesAdmin().prepareCreate(index) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + + .startObject(timestampField) + .field("type", randomFrom("date", "date_nanos")) + .field("format", "strict_date_optional_time_nanos") + .endObject() + + .endObject() + .endObject() + ) + .setSettings(indexSettingsNoReplicas(numShards).put(INDEX_SOFT_DELETES_SETTING.getKey(), true).put(extraSettings)) + ); + ensureGreen(index); + } + + private void indexDocumentsWithOnlyOneTimestampField(String timestampField, String index, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); + for (int i = 0; i < docCount; i++) { + indexRequestBuilders.add( + prepareIndex(index).setSource( + timestampField, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ) + ) + ); + } + indexRandom(true, false, indexRequestBuilders); + + assertThat(indicesAdmin().prepareForceMerge(index).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), equalTo(0)); + refresh(index); + forceMerge(); + } + + private void indexDocumentsWithTimestampAndEventIngestedDates(String indexName, int docCount, String timestampTemplate) + throws Exception { + final List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < docCount; i++) { indexRequestBuilders.add( prepareIndex(indexName).setSource( DataStream.TIMESTAMP_FIELD_NAME, + String.format( + Locale.ROOT, + timestampTemplate, + between(0, 23), + between(0, 59), + between(0, 59), + randomLongBetween(0, 999999999L) + ), + IndexMetadata.EVENT_INGESTED_FIELD_NAME, String.format( Locale.ROOT, timestampTemplate, @@ -789,4 +1085,39 @@ private void waitUntilRecoveryIsDone(String index) throws Exception { private void waitUntilAllShardsAreUnassigned(Index index) throws Exception { awaitClusterState(state -> state.getRoutingTable().index(index).allPrimaryShardsUnassigned()); } + + record SearchShardAPIResult(List skipped, List notSkipped) {} + + private static SearchShardAPIResult doSearchShardAPIQuery( + List indicesToSearch, + RangeQueryBuilder rangeQuery, + boolean allowPartialSearchResults, + int expectedTotalShards + ) { + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + indicesToSearch.toArray(new String[0]), + SearchRequest.DEFAULT_INDICES_OPTIONS, + rangeQuery, + null, + null, + allowPartialSearchResults, + null + ); + + SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet(); + assertThat(searchShardsResponse.getGroups().size(), equalTo(expectedTotalShards)); + List> partitionedBySkipped = searchShardsResponse.getGroups() + .stream() + .collect( + Collectors.teeing( + Collectors.filtering(g -> g.skipped(), Collectors.toList()), + Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), + List::of + ) + ); + + List skipped = partitionedBySkipped.get(0); + List notSkipped = partitionedBySkipped.get(1); + return new SearchShardAPIResult(skipped, notSkipped); + } } From 1d1e8d1e3bcf3092a4b5ee6b295bc35024d37fb7 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 26 Aug 2024 16:39:39 +0200 Subject: [PATCH 076/352] Introduce a XYQueryUtils to be used by _search and ESQL to generate Cartesian queries (#112204) --- .../lucene/spatial/XYQueriesUtils.java | 56 ++++++--- .../CartesianPointDocValuesQueryTests.java | 116 ++++++++++++++++++ .../CartesianShapeDocValuesQueryTests.java | 36 ++---- .../scalar/spatial/SpatialContains.java | 5 +- .../scalar/spatial/SpatialDisjoint.java | 5 +- .../scalar/spatial/SpatialIntersects.java | 5 +- .../spatial/SpatialRelatesFunction.java | 3 +- .../scalar/spatial/SpatialWithin.java | 5 +- .../planner/EsqlExpressionTranslators.java | 4 +- .../querydsl/query/SpatialRelatesQuery.java | 88 ++----------- .../index/mapper/PointFieldMapper.java | 7 +- .../index/mapper/ShapeFieldMapper.java | 7 +- .../index/query/ShapeQueryProcessor.java | 39 ------ .../spatial/ingest/CircleProcessorTests.java | 7 +- 14 files changed, 202 insertions(+), 181 deletions(-) rename x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java => server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java (80%) create mode 100644 server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java delete mode 100644 x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java b/server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java similarity index 80% rename from x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java rename to server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java index 22616eabf8211..23aaf2ab16722 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryPointProcessor.java +++ b/server/src/main/java/org/elasticsearch/lucene/spatial/XYQueriesUtils.java @@ -1,13 +1,16 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.spatial.index.query; + +package org.elasticsearch.lucene.spatial; import org.apache.lucene.document.XYDocValuesField; import org.apache.lucene.document.XYPointField; +import org.apache.lucene.document.XYShape; import org.apache.lucene.geo.Component2D; import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPoint; @@ -25,23 +28,25 @@ import java.util.Arrays; -/** Generates a lucene query for a spatial query over a point field. - * - * Note that lucene only supports intersects spatial relation so we build other relations - * using just that one. - * */ -public class ShapeQueryPointProcessor { +/** Utility methods that generate a lucene query for a spatial query over a cartesian field.* */ +public class XYQueriesUtils { - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean isIndexed, boolean hasDocValues) { - assert isIndexed || hasDocValues; + /** Generates a lucene query for a field that has been previously indexed using {@link XYPoint}.It expects + * either {code indexed} or {@code has docValues} to be true or or both to be true. + * + * Note that lucene only supports intersects spatial relation so we build other relations + * using just that one. + * */ + public static Query toXYPointQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); // XYPointField only supports intersects query so we build all the relationships using that logic. // it is not very efficient but it works. return switch (relation) { - case INTERSECTS -> buildIntersectsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case DISJOINT -> buildDisjointQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case CONTAINS -> buildContainsQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); - case WITHIN -> buildWithinQuery(fieldName, isIndexed, hasDocValues, luceneGeometries); + case INTERSECTS -> buildIntersectsQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case DISJOINT -> buildDisjointQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case CONTAINS -> buildContainsQuery(fieldName, indexed, hasDocValues, luceneGeometries); + case WITHIN -> buildWithinQuery(fieldName, indexed, hasDocValues, luceneGeometries); }; } @@ -276,4 +281,25 @@ public int hashCode() { return Arrays.hashCode(geometries); } } + + /** Generates a lucene query for a field that has been previously indexed using {@link XYShape}.It expects + * either {code indexed} or {@code has docValues} to be true or both to be true. */ + public static Query toXYShapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { + assert indexed || hasDocValues; + if (geometry == null || geometry.isEmpty()) { + return new MatchNoDocsQuery(); + } + final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + Query query; + if (indexed) { + query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + if (hasDocValues) { + final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + query = new IndexOrDocValuesQuery(query, queryDocValues); + } + } else { + query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); + } + return query; + } } diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java new file mode 100644 index 0000000000000..ff9074dba52eb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianPointDocValuesQueryTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.lucene.spatial; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.XYDocValuesField; +import org.apache.lucene.document.XYPointField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SerialMergeScheduler; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.search.CheckHits; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class CartesianPointDocValuesQueryTests extends ESTestCase { + + private static final String FIELD_NAME = "field"; + + public void testIndexSimpleShapes() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // Else seeds may not reproduce: + iwc.setMergeScheduler(new SerialMergeScheduler()); + // Else we can get O(N^2) merging: + iwc.setMaxBufferedDocs(10); + Directory dir = newDirectory(); + // RandomIndexWriter is too slow here: + IndexWriter w = new IndexWriter(dir, iwc); + final int numDocs = randomIntBetween(10, 1000); + for (int id = 0; id < numDocs; id++) { + Document doc = new Document(); + Point point = ShapeTestUtils.randomPoint(); + doc.add(new XYPointField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + doc.add(new XYDocValuesField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + w.addDocument(doc); + } + + if (random().nextBoolean()) { + w.forceMerge(1); + } + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + for (int i = 0; i < 25; i++) { + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, false, true); + assertQueries(s, indexQuery, docValQuery, numDocs); + } + } + IOUtils.close(r, dir); + } + + public void testIndexMultiShapes() throws Exception { + IndexWriterConfig iwc = newIndexWriterConfig(); + // Else seeds may not reproduce: + iwc.setMergeScheduler(new SerialMergeScheduler()); + // Else we can get O(N^2) merging: + iwc.setMaxBufferedDocs(10); + Directory dir = newDirectory(); + // RandomIndexWriter is too slow here: + IndexWriter w = new IndexWriter(dir, iwc); + final int numDocs = randomIntBetween(10, 100); + CartesianShapeIndexer indexer = new CartesianShapeIndexer(FIELD_NAME); + for (int id = 0; id < numDocs; id++) { + Document doc = new Document(); + for (int i = 0; i < randomIntBetween(1, 5); i++) { + Point point = ShapeTestUtils.randomPoint(); + doc.add(new XYPointField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + doc.add(new XYDocValuesField(FIELD_NAME, (float) point.getX(), (float) point.getY())); + w.addDocument(doc); + } + w.addDocument(doc); + } + + if (random().nextBoolean()) { + w.forceMerge(1); + } + final IndexReader r = DirectoryReader.open(w); + w.close(); + + IndexSearcher s = newSearcher(r); + for (int i = 0; i < 25; i++) { + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYPointQuery(geometry, FIELD_NAME, relation, false, true); + assertQueries(s, indexQuery, docValQuery, numDocs); + } + } + IOUtils.close(r, dir); + } + + private void assertQueries(IndexSearcher s, Query indexQuery, Query docValQuery, int numDocs) throws IOException { + assertEquals(s.count(indexQuery), s.count(docValQuery)); + CheckHits.checkEqual(docValQuery, s.search(indexQuery, numDocs).scoreDocs, s.search(docValQuery, numDocs).scoreDocs); + } +} diff --git a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java index 9ee84fcaa352f..e98b9016cca1c 100644 --- a/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/spatial/CartesianShapeDocValuesQueryTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.ShapeField; import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; import org.apache.lucene.geo.XYPolygon; import org.apache.lucene.geo.XYRectangle; import org.apache.lucene.index.DirectoryReader; @@ -27,6 +26,7 @@ import org.apache.lucene.tests.search.CheckHits; import org.apache.lucene.tests.search.QueryUtils; import org.elasticsearch.common.geo.LuceneGeometriesUtils; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.core.IOUtils; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geo.XShapeTestUtil; @@ -128,10 +128,10 @@ public void testIndexSimpleShapes() throws Exception { IndexSearcher s = newSearcher(r); for (int i = 0; i < 25; i++) { - XYGeometry[] geometries = randomLuceneQueryGeometries(); - for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { - Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, geometries); - Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, geometries); + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, false, true); assertQueries(s, indexQuery, docValQuery, numDocs); } } @@ -170,10 +170,10 @@ public void testIndexMultiShapes() throws Exception { IndexSearcher s = newSearcher(r); for (int i = 0; i < 25; i++) { - XYGeometry[] geometries = randomLuceneQueryGeometries(); - for (ShapeField.QueryRelation relation : ShapeField.QueryRelation.values()) { - Query indexQuery = XYShape.newGeometryQuery(FIELD_NAME, relation, geometries); - Query docValQuery = new CartesianShapeDocValuesQuery(FIELD_NAME, relation, geometries); + Geometry geometry = ShapeTestUtils.randomGeometry(false); + for (ShapeRelation relation : ShapeRelation.values()) { + Query indexQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, true, false); + Query docValQuery = XYQueriesUtils.toXYShapeQuery(geometry, FIELD_NAME, relation, false, true); assertQueries(s, indexQuery, docValQuery, numDocs); } } @@ -184,22 +184,4 @@ private void assertQueries(IndexSearcher s, Query indexQuery, Query docValQuery, assertEquals(s.count(indexQuery), s.count(docValQuery)); CheckHits.checkEqual(docValQuery, s.search(indexQuery, numDocs).scoreDocs, s.search(docValQuery, numDocs).scoreDocs); } - - private XYGeometry[] randomLuceneQueryGeometries() { - int numGeom = randomIntBetween(1, 3); - XYGeometry[] geometries = new XYGeometry[numGeom]; - for (int i = 0; i < numGeom; i++) { - geometries[i] = randomLuceneQueryGeometry(); - } - return geometries; - } - - private XYGeometry randomLuceneQueryGeometry() { - return switch (randomInt(3)) { - case 0 -> LuceneGeometriesUtils.toXYPolygon(ShapeTestUtils.randomPolygon(false)); - case 1 -> LuceneGeometriesUtils.toXYCircle(ShapeTestUtils.randomCircle(false)); - case 2 -> LuceneGeometriesUtils.toXYPoint(ShapeTestUtils.randomPoint(false)); - default -> XShapeTestUtil.nextBox(); - }; - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java index 6cb3c34ba8b1f..6788d13cf345e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialContains.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -151,8 +152,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.CONTAINS; + public ShapeRelation queryRelation() { + return ShapeRelation.CONTAINS; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java index d04dc9e1a6b07..eac50f84bd12d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjoint.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -106,8 +107,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.DISJOINT; + public ShapeRelation queryRelation() { + return ShapeRelation.DISJOINT; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java index 48e99989c5699..886551d1f3154 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialIntersects.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -104,8 +105,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.INTERSECTS; + public ShapeRelation queryRelation() { + return ShapeRelation.INTERSECTS; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java index 927c7aed936da..36e98984d2303 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialRelatesFunction.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.ShapeField; import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.geometry.Geometry; @@ -50,7 +51,7 @@ protected SpatialRelatesFunction(StreamInput in, boolean leftDocValues, boolean super(in, leftDocValues, rightDocValues, false); } - public abstract ShapeField.QueryRelation queryRelation(); + public abstract ShapeRelation queryRelation(); @Override public DataType dataType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java index c204468ae17d1..0b210f07a02f4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialWithin.java @@ -11,6 +11,7 @@ import org.apache.lucene.geo.Component2D; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.ann.Evaluator; @@ -106,8 +107,8 @@ public String getWriteableName() { } @Override - public ShapeField.QueryRelation queryRelation() { - return ShapeField.QueryRelation.WITHIN; + public ShapeRelation queryRelation() { + return ShapeRelation.WITHIN; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 854018f577bd9..b508dc6556456 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.esql.planner; -import org.apache.lucene.document.ShapeField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.geometry.Geometry; @@ -370,7 +370,7 @@ protected Query asQuery(SpatialRelatesFunction bc, TranslatorHandler handler) { return doTranslate(bc, handler); } - public static void checkSpatialRelatesFunction(Expression constantExpression, ShapeField.QueryRelation queryRelation) { + public static void checkSpatialRelatesFunction(Expression constantExpression, ShapeRelation queryRelation) { Check.isTrue( constantExpression.foldable(), "Line {}:{}: Comparisons against fields are not (currently) supported; offender [{}] in [ST_{}]", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java index d1e4e12f73868..4f0bcbb43e260 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SpatialRelatesQuery.java @@ -7,29 +7,18 @@ package org.elasticsearch.xpack.esql.querydsl.query; -import org.apache.lucene.document.ShapeField; -import org.apache.lucene.document.XYDocValuesField; -import org.apache.lucene.document.XYPointField; -import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchNoDocsQuery; import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.geo.LuceneGeometriesUtils; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.GeoShapeQueryable; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.querydsl.query.Query; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -42,11 +31,11 @@ public class SpatialRelatesQuery extends Query { private final String field; - private final ShapeField.QueryRelation queryRelation; + private final ShapeRelation queryRelation; private final Geometry shape; private final DataType dataType; - public SpatialRelatesQuery(Source source, String field, ShapeField.QueryRelation queryRelation, Geometry shape, DataType dataType) { + public SpatialRelatesQuery(Source source, String field, ShapeRelation queryRelation, Geometry shape, DataType dataType) { super(source); this.field = field; this.queryRelation = queryRelation; @@ -205,87 +194,36 @@ org.apache.lucene.search.Query buildShapeQuery(SearchExecutionContext context, M return new ConstantScoreQuery(innerQuery); } - /** - * This code is based on the ShapeQueryPointProcessor.shapeQuery() method, with additional support for two special cases: - *

    - *
  • - * DISJOINT queries (using {@code EXISTS && !INTERSECTS}, similar to {@code LegacyGeoShapeQueryProcessor.geoShapeQuery()}) - *
  • - *
  • - * CONTAINS queries (if the shape is a point, INTERSECTS is used, otherwise a MatchNoDocsQuery is built, - * similar to {@code LatLonPoint.makeContainsGeometryQuery()}) - *
  • - *
- */ private static org.apache.lucene.search.Query pointShapeQuery( Geometry geometry, String fieldName, - ShapeField.QueryRelation relation, + ShapeRelation relation, SearchExecutionContext context ) { - final boolean hasDocValues = context.getFieldType(fieldName).hasDocValues(); - if (geometry == null || geometry.isEmpty()) { - throw new QueryShardException(context, "Invalid/empty geometry"); - } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - if (isPointGeometry(luceneGeometries) == false && relation == ShapeField.QueryRelation.CONTAINS) { - return new MatchNoDocsQuery("A point field can never contain a non-point geometry"); - } - org.apache.lucene.search.Query intersects = XYPointField.newGeometryQuery(fieldName, luceneGeometries); - if (relation == ShapeField.QueryRelation.DISJOINT) { - // XYPointField does not support DISJOINT queries, so we build one as EXISTS && !INTERSECTS - BooleanQuery.Builder bool = new BooleanQuery.Builder(); - org.apache.lucene.search.Query exists = ExistsQueryBuilder.newFilter(context, fieldName, false); - bool.add(exists, BooleanClause.Occur.MUST); - bool.add(intersects, BooleanClause.Occur.MUST_NOT); - return bool.build(); - } - - // Point-Intersects works for all cases except CONTAINS(shape) and DISJOINT, which are handled separately above - if (hasDocValues) { - final org.apache.lucene.search.Query queryDocValues = XYDocValuesField.newSlowGeometryQuery(fieldName, luceneGeometries); - intersects = new IndexOrDocValuesQuery(intersects, queryDocValues); + final MappedFieldType fieldType = context.getFieldType(fieldName); + try { + return XYQueriesUtils.toXYPointQuery(geometry, fieldName, relation, fieldType.isIndexed(), fieldType.hasDocValues()); + } catch (IllegalArgumentException e) { + throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } - return intersects; - } - - private static boolean isPointGeometry(XYGeometry[] geometries) { - return geometries.length == 1 && geometries[0] instanceof org.apache.lucene.geo.XYPoint; } - /** - * This code is based on the ShapeQueryProcessor.shapeQuery() method - */ private static org.apache.lucene.search.Query shapeShapeQuery( Geometry geometry, String fieldName, - ShapeField.QueryRelation relation, + ShapeRelation relation, SearchExecutionContext context ) { - final boolean hasDocValues = context.getFieldType(fieldName).hasDocValues(); // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0); - if (relation == ShapeField.QueryRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) { throw new QueryShardException(context, relation + " query relation not supported for Field [" + fieldName + "]."); } - if (geometry == null || geometry.isEmpty()) { - throw new QueryShardException(context, "Invalid/empty geometry"); - } - final XYGeometry[] luceneGeometries; + final MappedFieldType fieldType = context.getFieldType(fieldName); try { - luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); + return XYQueriesUtils.toXYShapeQuery(geometry, fieldName, relation, fieldType.isIndexed(), fieldType.hasDocValues()); } catch (IllegalArgumentException e) { throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } - org.apache.lucene.search.Query query = XYShape.newGeometryQuery(fieldName, relation, luceneGeometries); - if (hasDocValues) { - final org.apache.lucene.search.Query queryDocValues = new CartesianShapeDocValuesQuery( - fieldName, - relation, - luceneGeometries - ); - query = new IndexOrDocValuesQuery(query, queryDocValues); - } - return query; } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 9412dc3c5eb53..2901e374003dd 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -25,11 +25,11 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.spatial.common.CartesianPoint; import org.elasticsearch.xpack.spatial.index.fielddata.plain.CartesianPointIndexFieldData; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryPointProcessor; import org.elasticsearch.xpack.spatial.script.field.CartesianPointDocValuesField; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianPointValuesSourceType; @@ -178,8 +178,6 @@ public FieldMapper.Builder getMergeBuilder() { public static class PointFieldType extends AbstractPointFieldType implements ShapeQueryable { - private final ShapeQueryPointProcessor queryProcessor; - private PointFieldType( String name, boolean indexed, @@ -190,7 +188,6 @@ private PointFieldType( Map meta ) { super(name, indexed, stored, hasDocValues, parser, nullValue, meta); - this.queryProcessor = new ShapeQueryPointProcessor(); } // only used in test @@ -216,7 +213,7 @@ public String typeName() { @Override public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) { failIfNotIndexedNorDocValuesFallback(context); - return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + return XYQueriesUtils.toXYPointQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index ab57efee527dc..0ea8c3e22e288 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -31,6 +31,7 @@ import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.script.field.AbstractScriptFieldFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.Field; @@ -39,7 +40,6 @@ import org.elasticsearch.xpack.spatial.index.fielddata.CartesianShapeValues; import org.elasticsearch.xpack.spatial.index.fielddata.plain.AbstractAtomicCartesianShapeFieldData; import org.elasticsearch.xpack.spatial.index.fielddata.plain.CartesianShapeIndexFieldData; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import org.elasticsearch.xpack.spatial.search.aggregations.support.CartesianShapeValuesSourceType; import java.io.IOException; @@ -137,8 +137,6 @@ public ShapeFieldMapper build(MapperBuilderContext context) { public static final class ShapeFieldType extends AbstractShapeGeometryFieldType implements ShapeQueryable { - private final ShapeQueryProcessor queryProcessor; - public ShapeFieldType( String name, boolean indexed, @@ -148,7 +146,6 @@ public ShapeFieldType( Map meta ) { super(name, indexed, false, hasDocValues, parser, orientation, meta); - this.queryProcessor = new ShapeQueryProcessor(); } @Override @@ -172,7 +169,7 @@ public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation ); } try { - return queryProcessor.shapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); + return XYQueriesUtils.toXYShapeQuery(shape, fieldName, relation, isIndexed(), hasDocValues()); } catch (IllegalArgumentException e) { throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java deleted file mode 100644 index 25a0e55c027f5..0000000000000 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryProcessor.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.spatial.index.query; - -import org.apache.lucene.document.XYShape; -import org.apache.lucene.geo.XYGeometry; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.geo.LuceneGeometriesUtils; -import org.elasticsearch.common.geo.ShapeRelation; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.lucene.spatial.CartesianShapeDocValuesQuery; - -public class ShapeQueryProcessor { - - public Query shapeQuery(Geometry geometry, String fieldName, ShapeRelation relation, boolean indexed, boolean hasDocValues) { - assert indexed || hasDocValues; - if (geometry == null || geometry.isEmpty()) { - return new MatchNoDocsQuery(); - } - final XYGeometry[] luceneGeometries = LuceneGeometriesUtils.toXYGeometry(geometry, t -> {}); - Query query; - if (indexed) { - query = XYShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - if (hasDocValues) { - final Query queryDocValues = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - query = new IndexOrDocValuesQuery(query, queryDocValues); - } - } else { - query = new CartesianShapeDocValuesQuery(fieldName, relation.getLuceneRelation(), luceneGeometries); - } - return query; - } -} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java index e71b4f0f4e981..66f5597be543e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -32,13 +32,13 @@ import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; +import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper.GeoShapeWithDocValuesFieldType; -import org.elasticsearch.xpack.spatial.index.query.ShapeQueryProcessor; import java.io.IOException; import java.util.Collections; @@ -242,9 +242,8 @@ public void testShapeQuery() throws IOException { int numSides = randomIntBetween(4, 1000); Geometry geometry = CircleUtils.createRegularShapePolygon(circle, numSides); - ShapeQueryProcessor processor = new ShapeQueryProcessor(); - Query sameShapeQuery = processor.shapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); - Query centerPointQuery = processor.shapeQuery( + Query sameShapeQuery = XYQueriesUtils.toXYShapeQuery(geometry, fieldName, ShapeRelation.INTERSECTS, true, true); + Query centerPointQuery = XYQueriesUtils.toXYShapeQuery( new Point(circle.getLon(), circle.getLat()), fieldName, ShapeRelation.INTERSECTS, From e07a3951859cc0d19ac320aec7568a9e0ff6400a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:44:06 +0200 Subject: [PATCH 077/352] Fix testPEMKeyConfigReloading intermittent failures (#112086) Relates: https://github.com/elastic/elasticsearch/issues/101427 The purpose of `testPEMKeyConfigReloading` is to make sure `SSLConfigurationReloader` works as expected when the ssl public key and certificate files the ssl config points to are changed. The `SSLConfigurationReloader` uses a `ResourceWatcherService` to notify a provided consumer when the files have changed. In this test we first setup a web server using a valid certificate and key that the client trusts and then we validate that the SSL handshake is ok. We then modify the key and cert file to contain a certificate that's not trusted by the client and validate that the SSL handshake fails ("PKIX path validation failed"). Overwriting the key and certificate is a two step non-atomic process (two file overwrites). The test was failing intermittently because sometimes the one second refresh interval happened between the two overwrites, so only the public key was overwritten, resulting in an unexpected (to the test) error: "Signature length not correct: got 512 but was expecting 256". This can be reproduced by commenting out the certificate overwrite. The proposed fix is to separate the two file overwrite operations into two separate reload events by sleeping for the reload interval + 1ms between each file overwrite. I also lowered the interval to make sure the test isn't slower than before. --- .../ssl/SSLConfigurationReloaderTests.java | 41 +++++++++++++++---- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 7b19d53663a08..4d1ebf6cbaabc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -68,6 +68,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -201,14 +202,19 @@ public void testPEMKeyConfigReloading() throws Exception { throw new RuntimeException("Exception starting or connecting to the mock server", e); } }; - final Runnable modifier = () -> { + final List modifierFunctions = List.of(() -> { try { atomicMoveIfPossible(updatedKeyPath, keyPath); + } catch (Exception e) { + throw new RuntimeException("failed to modify file", e); + } + }, () -> { + try { atomicMoveIfPossible(updatedCertPath, certPath); } catch (Exception e) { throw new RuntimeException("failed to modify file", e); } - }; + }); // The new server certificate is not in the client's truststore so SSLHandshake should fail final Consumer keyMaterialPostChecks = (updatedContext) -> { @@ -224,7 +230,7 @@ public void testPEMKeyConfigReloading() throws Exception { throw new RuntimeException("Exception starting or connecting to the mock server", e); } }; - validateSSLConfigurationIsReloaded(env, keyMaterialPreChecks, modifier, keyMaterialPostChecks); + validateSSLConfigurationIsReloaded(env, keyMaterialPreChecks, modifierFunctions, keyMaterialPostChecks); } } @@ -559,28 +565,45 @@ private Settings.Builder baseKeystoreSettings(Path tempDir, MockSecureSettings s private void validateSSLConfigurationIsReloaded( Environment env, Consumer preChecks, - Runnable modificationFunction, + Runnable modifierFunction, + Consumer postChecks + ) throws Exception { + validateSSLConfigurationIsReloaded(env, preChecks, List.of(modifierFunction), postChecks); + } + + private void validateSSLConfigurationIsReloaded( + Environment env, + Consumer preChecks, + List modifierFunctions, Consumer postChecks ) throws Exception { - final CountDownLatch reloadLatch = new CountDownLatch(1); + final CyclicBarrier reloadBarrier = new CyclicBarrier(2); final SSLService sslService = new SSLService(env); final SslConfiguration config = sslService.getSSLConfiguration("xpack.security.transport.ssl"); final Consumer reloadConsumer = sslConfiguration -> { try { sslService.reloadSSLContext(sslConfiguration); } finally { - reloadLatch.countDown(); + try { + reloadBarrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; new SSLConfigurationReloader(reloadConsumer, resourceWatcherService, SSLService.getSSLConfigurations(env).values()); // Baseline checks preChecks.accept(sslService.sslContextHolder(config).sslContext()); - assertEquals("nothing should have called reload", 1, reloadLatch.getCount()); + assertEquals("nothing should have called reload", 0, reloadBarrier.getNumberWaiting()); // modify - modificationFunction.run(); - reloadLatch.await(); + for (var modifierFunction : modifierFunctions) { + modifierFunction.run(); + reloadBarrier.await(); + reloadBarrier.reset(); + } + // checks after reload postChecks.accept(sslService.sslContextHolder(config).sslContext()); } From eaf27bdda38d3a94bdf099ac8e1f0ba0feab72da Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 27 Aug 2024 01:28:10 +1000 Subject: [PATCH 078/352] Mute org.elasticsearch.xpack.ml.integration.MlJobIT testDeleteJobAsync #112212 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 77ec7800f8a4d..85c29759cabb2 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -169,6 +169,9 @@ tests: - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {mv_percentile.FromIndex ASYNC} issue: https://github.com/elastic/elasticsearch/issues/112193 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDeleteJobAsync + issue: https://github.com/elastic/elasticsearch/issues/112212 # Examples: # From 5580b91d522b615a261d46eb743316dfcd5cf11f Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Mon, 26 Aug 2024 12:17:50 -0400 Subject: [PATCH 079/352] [ES|QL] Name parameter with leading underscore (#111950) * name parameter with leading underscore --- docs/changelog/111950.yaml | 6 + .../xpack/esql/core/util/StringUtils.java | 31 +- .../xpack/esql/qa/rest/RestEsqlTestCase.java | 9 +- .../esql/src/main/antlr/EsqlBaseLexer.g4 | 2 +- .../xpack/esql/action/RequestXContent.java | 11 +- .../xpack/esql/parser/EsqlBaseLexer.interp | 2 +- .../xpack/esql/parser/EsqlBaseLexer.java | 1262 +++++++++-------- .../esql/action/EsqlQueryRequestTests.java | 25 +- .../esql/parser/StatementParserTests.java | 155 +- 9 files changed, 841 insertions(+), 662 deletions(-) create mode 100644 docs/changelog/111950.yaml diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml new file mode 100644 index 0000000000000..3f23c17d8e652 --- /dev/null +++ b/docs/changelog/111950.yaml @@ -0,0 +1,6 @@ +pr: 111950 +summary: "[ES|QL] Name parameter with leading underscore" +area: ES|QL +type: enhancement +issues: + - 111821 diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java index 4ba3658697c0d..cd0ade2054ce6 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/StringUtils.java @@ -398,20 +398,41 @@ public static boolean isQualified(String indexWildcard) { public static boolean isInteger(String value) { for (char c : value.trim().toCharArray()) { - if (Character.isDigit(c) == false) { + if (isDigit(c) == false) { return false; } } return true; } + private static boolean isLetter(char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); + } + + private static boolean isDigit(char c) { + return c >= '0' && c <= '9'; + } + + private static boolean isUnderscore(char c) { + return c == '_'; + } + + private static boolean isLetterOrDigitOrUnderscore(char c) { + return isLetter(c) || isDigit(c) || isUnderscore(c); + } + + private static boolean isLetterOrUnderscore(char c) { + return isLetter(c) || isUnderscore(c); + } + public static boolean isValidParamName(String value) { - // A valid name starts with a letter and contain only letter, digit or _ - if (Character.isLetter(value.charAt(0)) == false) { + // A valid name starts with a letter or _ + if (isLetterOrUnderscore(value.charAt(0)) == false) { return false; } - for (char c : value.trim().toCharArray()) { - if (Character.isLetterOrDigit(c) == false && c != '_') { + // contain only letter, digit or _ + for (char c : value.toCharArray()) { + if (isLetterOrDigitOrUnderscore(c) == false) { return false; } } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 8b6511875e86c..6f5297bbeef4d 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -576,16 +576,18 @@ public void testErrorMessageForInvalidParams() throws IOException { () -> runEsqlSync( requestObjectBuilder().query("row a = 1 | eval x = ?, y = ?") .params( - "[{\"1\": \"v1\"}, {\"1-\": \"v1\"}, {\"_a\": \"v1\"}, {\"@-#\": \"v1\"}, true, 123, " - + "{\"type\": \"byte\", \"value\": 5}]" + "[{\"1\": \"v1\"}, {\"1-\": \"v1\"}, {\"-a\": \"v1\"}, {\"@-#\": \"v1\"}, true, 123, " + + "{\"type\": \"byte\", \"value\": 5}, {\"_1\": \"v1\"}, {\"_a\": \"v1\"}]" ) ) ); String error = EntityUtils.toString(re.getResponse().getEntity()).replaceAll("\\\\\n\s+\\\\", ""); assertThat(error, containsString("[1] is not a valid parameter name")); assertThat(error, containsString("[1-] is not a valid parameter name")); - assertThat(error, containsString("[_a] is not a valid parameter name")); + assertThat(error, containsString("[-a] is not a valid parameter name")); assertThat(error, containsString("[@-#] is not a valid parameter name")); + assertThat(error, not(containsString("[_a] is not a valid parameter name"))); + assertThat(error, not(containsString("[_1] is not a valid parameter name"))); assertThat(error, containsString("Params cannot contain both named and unnamed parameters")); assertThat(error, containsString("Cannot parse more than one key:value pair as parameter")); re = expectThrows( @@ -600,7 +602,6 @@ public void testErrorMessageForInvalidParams() throws IOException { EntityUtils.toString(re.getResponse().getEntity()), containsString("No parameter is defined for position 2, did you mean position 1") ); - re = expectThrows( ResponseException.class, () -> runEsqlSync(requestObjectBuilder().query("row a = ?n0").params("[{\"n1\": \"v1\"}]")) diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 index 769c399fe2dcf..84494458cbc26 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 @@ -161,7 +161,7 @@ SLASH : '/'; PERCENT : '%'; NAMED_OR_POSITIONAL_PARAM - : PARAM LETTER UNQUOTED_ID_BODY* + : PARAM (LETTER | UNDERSCORE) UNQUOTED_ID_BODY* | PARAM DIGIT+ ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java index 810e313002189..76573a8f4cc1d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RequestXContent.java @@ -155,14 +155,16 @@ private static QueryParams parseParams(XContentParser p) throws IOException { ); } for (Map.Entry entry : param.fields.entrySet()) { - if (isValidParamName(entry.getKey()) == false) { + String name = entry.getKey(); + if (isValidParamName(name) == false) { errors.add( new XContentParseException( loc, "[" - + entry.getKey() + + name + "] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, " + + "and contains letters, digits and underscores only" ) ); } @@ -170,7 +172,7 @@ private static QueryParams parseParams(XContentParser p) throws IOException { if (type == null) { errors.add(new XContentParseException(loc, entry + " is not supported as a parameter")); } - currentParam = new QueryParam(entry.getKey(), entry.getValue(), type); + currentParam = new QueryParam(name, entry.getValue(), type); namedParams.add(currentParam); } } else { @@ -203,6 +205,7 @@ private static QueryParams parseParams(XContentParser p) throws IOException { } } } + // don't allow mixed named and unnamed parameters if (namedParams.isEmpty() == false && unNamedParams.isEmpty() == false) { errors.add( new XContentParseException( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp index faf00552381fb..a8d01c959cd7f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp @@ -477,4 +477,4 @@ METRICS_MODE CLOSING_METRICS_MODE atn: -[4, 0, 126, 1468, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 4, 21, 587, 8, 21, 11, 21, 12, 21, 588, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 597, 8, 22, 10, 22, 12, 22, 600, 9, 22, 1, 22, 3, 22, 603, 8, 22, 1, 22, 3, 22, 606, 8, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 615, 8, 23, 10, 23, 12, 23, 618, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 4, 24, 626, 8, 24, 11, 24, 12, 24, 627, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 3, 25, 635, 8, 25, 1, 26, 4, 26, 638, 8, 26, 11, 26, 12, 26, 639, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 1, 37, 3, 37, 679, 8, 37, 1, 37, 4, 37, 682, 8, 37, 11, 37, 12, 37, 683, 1, 38, 1, 38, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 3, 40, 693, 8, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 3, 42, 700, 8, 42, 1, 43, 1, 43, 1, 43, 5, 43, 705, 8, 43, 10, 43, 12, 43, 708, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 716, 8, 43, 10, 43, 12, 43, 719, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 3, 43, 726, 8, 43, 1, 43, 3, 43, 729, 8, 43, 3, 43, 731, 8, 43, 1, 44, 4, 44, 734, 8, 44, 11, 44, 12, 44, 735, 1, 45, 4, 45, 739, 8, 45, 11, 45, 12, 45, 740, 1, 45, 1, 45, 5, 45, 745, 8, 45, 10, 45, 12, 45, 748, 9, 45, 1, 45, 1, 45, 4, 45, 752, 8, 45, 11, 45, 12, 45, 753, 1, 45, 4, 45, 757, 8, 45, 11, 45, 12, 45, 758, 1, 45, 1, 45, 5, 45, 763, 8, 45, 10, 45, 12, 45, 766, 9, 45, 3, 45, 768, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 4, 45, 774, 8, 45, 11, 45, 12, 45, 775, 1, 45, 1, 45, 3, 45, 780, 8, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 5, 82, 908, 8, 82, 10, 82, 12, 82, 911, 9, 82, 1, 82, 1, 82, 4, 82, 915, 8, 82, 11, 82, 12, 82, 916, 3, 82, 919, 8, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 5, 85, 933, 8, 85, 10, 85, 12, 85, 936, 9, 85, 1, 85, 1, 85, 3, 85, 940, 8, 85, 1, 85, 4, 85, 943, 8, 85, 11, 85, 12, 85, 944, 3, 85, 947, 8, 85, 1, 86, 1, 86, 4, 86, 951, 8, 86, 11, 86, 12, 86, 952, 1, 86, 1, 86, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1042, 8, 106, 1, 107, 1, 107, 3, 107, 1046, 8, 107, 1, 107, 5, 107, 1049, 8, 107, 10, 107, 12, 107, 1052, 9, 107, 1, 107, 1, 107, 3, 107, 1056, 8, 107, 1, 107, 4, 107, 1059, 8, 107, 11, 107, 12, 107, 1060, 3, 107, 1063, 8, 107, 1, 108, 1, 108, 4, 108, 1067, 8, 108, 11, 108, 12, 108, 1068, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 126, 4, 126, 1144, 8, 126, 11, 126, 12, 126, 1145, 1, 126, 1, 126, 3, 126, 1150, 8, 126, 1, 126, 4, 126, 1153, 8, 126, 11, 126, 12, 126, 1154, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 4, 177, 1377, 8, 177, 11, 177, 12, 177, 1378, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 2, 616, 717, 0, 196, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 25, 66, 0, 68, 26, 70, 0, 72, 0, 74, 27, 76, 28, 78, 29, 80, 30, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 0, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 72, 186, 73, 188, 0, 190, 74, 192, 75, 194, 76, 196, 77, 198, 0, 200, 0, 202, 0, 204, 0, 206, 0, 208, 0, 210, 78, 212, 0, 214, 0, 216, 79, 218, 80, 220, 81, 222, 0, 224, 0, 226, 0, 228, 0, 230, 0, 232, 82, 234, 83, 236, 84, 238, 85, 240, 0, 242, 0, 244, 0, 246, 0, 248, 86, 250, 0, 252, 87, 254, 88, 256, 89, 258, 0, 260, 0, 262, 90, 264, 91, 266, 0, 268, 92, 270, 0, 272, 93, 274, 94, 276, 95, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 0, 290, 0, 292, 96, 294, 97, 296, 98, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 0, 310, 0, 312, 99, 314, 100, 316, 101, 318, 0, 320, 0, 322, 0, 324, 0, 326, 102, 328, 103, 330, 104, 332, 0, 334, 0, 336, 0, 338, 0, 340, 105, 342, 106, 344, 107, 346, 0, 348, 108, 350, 109, 352, 110, 354, 111, 356, 0, 358, 112, 360, 113, 362, 114, 364, 115, 366, 0, 368, 116, 370, 117, 372, 118, 374, 119, 376, 120, 378, 0, 380, 0, 382, 0, 384, 121, 386, 122, 388, 123, 390, 0, 392, 0, 394, 124, 396, 125, 398, 126, 400, 0, 402, 0, 404, 0, 406, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1494, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 1, 78, 1, 0, 0, 0, 2, 80, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 184, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 2, 194, 1, 0, 0, 0, 2, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 216, 1, 0, 0, 0, 3, 218, 1, 0, 0, 0, 3, 220, 1, 0, 0, 0, 4, 222, 1, 0, 0, 0, 4, 224, 1, 0, 0, 0, 4, 226, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 4, 236, 1, 0, 0, 0, 4, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 5, 254, 1, 0, 0, 0, 5, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 260, 1, 0, 0, 0, 6, 262, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 6, 274, 1, 0, 0, 0, 6, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 7, 294, 1, 0, 0, 0, 7, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 8, 310, 1, 0, 0, 0, 8, 312, 1, 0, 0, 0, 8, 314, 1, 0, 0, 0, 8, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 9, 324, 1, 0, 0, 0, 9, 326, 1, 0, 0, 0, 9, 328, 1, 0, 0, 0, 9, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 10, 338, 1, 0, 0, 0, 10, 340, 1, 0, 0, 0, 10, 342, 1, 0, 0, 0, 10, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 11, 348, 1, 0, 0, 0, 11, 350, 1, 0, 0, 0, 11, 352, 1, 0, 0, 0, 11, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 12, 358, 1, 0, 0, 0, 12, 360, 1, 0, 0, 0, 12, 362, 1, 0, 0, 0, 12, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 13, 370, 1, 0, 0, 0, 13, 372, 1, 0, 0, 0, 13, 374, 1, 0, 0, 0, 13, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 14, 380, 1, 0, 0, 0, 14, 382, 1, 0, 0, 0, 14, 384, 1, 0, 0, 0, 14, 386, 1, 0, 0, 0, 14, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 15, 396, 1, 0, 0, 0, 15, 398, 1, 0, 0, 0, 15, 400, 1, 0, 0, 0, 15, 402, 1, 0, 0, 0, 15, 404, 1, 0, 0, 0, 15, 406, 1, 0, 0, 0, 16, 408, 1, 0, 0, 0, 18, 418, 1, 0, 0, 0, 20, 425, 1, 0, 0, 0, 22, 434, 1, 0, 0, 0, 24, 441, 1, 0, 0, 0, 26, 451, 1, 0, 0, 0, 28, 458, 1, 0, 0, 0, 30, 465, 1, 0, 0, 0, 32, 479, 1, 0, 0, 0, 34, 486, 1, 0, 0, 0, 36, 494, 1, 0, 0, 0, 38, 503, 1, 0, 0, 0, 40, 510, 1, 0, 0, 0, 42, 520, 1, 0, 0, 0, 44, 532, 1, 0, 0, 0, 46, 541, 1, 0, 0, 0, 48, 547, 1, 0, 0, 0, 50, 554, 1, 0, 0, 0, 52, 561, 1, 0, 0, 0, 54, 569, 1, 0, 0, 0, 56, 577, 1, 0, 0, 0, 58, 586, 1, 0, 0, 0, 60, 592, 1, 0, 0, 0, 62, 609, 1, 0, 0, 0, 64, 625, 1, 0, 0, 0, 66, 634, 1, 0, 0, 0, 68, 637, 1, 0, 0, 0, 70, 641, 1, 0, 0, 0, 72, 646, 1, 0, 0, 0, 74, 651, 1, 0, 0, 0, 76, 655, 1, 0, 0, 0, 78, 659, 1, 0, 0, 0, 80, 663, 1, 0, 0, 0, 82, 667, 1, 0, 0, 0, 84, 669, 1, 0, 0, 0, 86, 671, 1, 0, 0, 0, 88, 674, 1, 0, 0, 0, 90, 676, 1, 0, 0, 0, 92, 685, 1, 0, 0, 0, 94, 687, 1, 0, 0, 0, 96, 692, 1, 0, 0, 0, 98, 694, 1, 0, 0, 0, 100, 699, 1, 0, 0, 0, 102, 730, 1, 0, 0, 0, 104, 733, 1, 0, 0, 0, 106, 779, 1, 0, 0, 0, 108, 781, 1, 0, 0, 0, 110, 784, 1, 0, 0, 0, 112, 788, 1, 0, 0, 0, 114, 792, 1, 0, 0, 0, 116, 794, 1, 0, 0, 0, 118, 797, 1, 0, 0, 0, 120, 799, 1, 0, 0, 0, 122, 804, 1, 0, 0, 0, 124, 806, 1, 0, 0, 0, 126, 812, 1, 0, 0, 0, 128, 818, 1, 0, 0, 0, 130, 821, 1, 0, 0, 0, 132, 824, 1, 0, 0, 0, 134, 829, 1, 0, 0, 0, 136, 834, 1, 0, 0, 0, 138, 836, 1, 0, 0, 0, 140, 842, 1, 0, 0, 0, 142, 846, 1, 0, 0, 0, 144, 851, 1, 0, 0, 0, 146, 857, 1, 0, 0, 0, 148, 860, 1, 0, 0, 0, 150, 862, 1, 0, 0, 0, 152, 868, 1, 0, 0, 0, 154, 870, 1, 0, 0, 0, 156, 875, 1, 0, 0, 0, 158, 878, 1, 0, 0, 0, 160, 881, 1, 0, 0, 0, 162, 884, 1, 0, 0, 0, 164, 886, 1, 0, 0, 0, 166, 889, 1, 0, 0, 0, 168, 891, 1, 0, 0, 0, 170, 894, 1, 0, 0, 0, 172, 896, 1, 0, 0, 0, 174, 898, 1, 0, 0, 0, 176, 900, 1, 0, 0, 0, 178, 902, 1, 0, 0, 0, 180, 918, 1, 0, 0, 0, 182, 920, 1, 0, 0, 0, 184, 925, 1, 0, 0, 0, 186, 946, 1, 0, 0, 0, 188, 948, 1, 0, 0, 0, 190, 956, 1, 0, 0, 0, 192, 958, 1, 0, 0, 0, 194, 962, 1, 0, 0, 0, 196, 966, 1, 0, 0, 0, 198, 970, 1, 0, 0, 0, 200, 975, 1, 0, 0, 0, 202, 979, 1, 0, 0, 0, 204, 983, 1, 0, 0, 0, 206, 987, 1, 0, 0, 0, 208, 991, 1, 0, 0, 0, 210, 995, 1, 0, 0, 0, 212, 1004, 1, 0, 0, 0, 214, 1008, 1, 0, 0, 0, 216, 1012, 1, 0, 0, 0, 218, 1016, 1, 0, 0, 0, 220, 1020, 1, 0, 0, 0, 222, 1024, 1, 0, 0, 0, 224, 1029, 1, 0, 0, 0, 226, 1033, 1, 0, 0, 0, 228, 1041, 1, 0, 0, 0, 230, 1062, 1, 0, 0, 0, 232, 1066, 1, 0, 0, 0, 234, 1070, 1, 0, 0, 0, 236, 1074, 1, 0, 0, 0, 238, 1078, 1, 0, 0, 0, 240, 1082, 1, 0, 0, 0, 242, 1087, 1, 0, 0, 0, 244, 1091, 1, 0, 0, 0, 246, 1095, 1, 0, 0, 0, 248, 1099, 1, 0, 0, 0, 250, 1102, 1, 0, 0, 0, 252, 1106, 1, 0, 0, 0, 254, 1110, 1, 0, 0, 0, 256, 1114, 1, 0, 0, 0, 258, 1118, 1, 0, 0, 0, 260, 1123, 1, 0, 0, 0, 262, 1128, 1, 0, 0, 0, 264, 1133, 1, 0, 0, 0, 266, 1140, 1, 0, 0, 0, 268, 1149, 1, 0, 0, 0, 270, 1156, 1, 0, 0, 0, 272, 1160, 1, 0, 0, 0, 274, 1164, 1, 0, 0, 0, 276, 1168, 1, 0, 0, 0, 278, 1172, 1, 0, 0, 0, 280, 1178, 1, 0, 0, 0, 282, 1182, 1, 0, 0, 0, 284, 1186, 1, 0, 0, 0, 286, 1190, 1, 0, 0, 0, 288, 1194, 1, 0, 0, 0, 290, 1198, 1, 0, 0, 0, 292, 1202, 1, 0, 0, 0, 294, 1206, 1, 0, 0, 0, 296, 1210, 1, 0, 0, 0, 298, 1214, 1, 0, 0, 0, 300, 1219, 1, 0, 0, 0, 302, 1223, 1, 0, 0, 0, 304, 1227, 1, 0, 0, 0, 306, 1231, 1, 0, 0, 0, 308, 1236, 1, 0, 0, 0, 310, 1240, 1, 0, 0, 0, 312, 1244, 1, 0, 0, 0, 314, 1248, 1, 0, 0, 0, 316, 1252, 1, 0, 0, 0, 318, 1256, 1, 0, 0, 0, 320, 1262, 1, 0, 0, 0, 322, 1266, 1, 0, 0, 0, 324, 1270, 1, 0, 0, 0, 326, 1274, 1, 0, 0, 0, 328, 1278, 1, 0, 0, 0, 330, 1282, 1, 0, 0, 0, 332, 1286, 1, 0, 0, 0, 334, 1291, 1, 0, 0, 0, 336, 1295, 1, 0, 0, 0, 338, 1299, 1, 0, 0, 0, 340, 1303, 1, 0, 0, 0, 342, 1307, 1, 0, 0, 0, 344, 1311, 1, 0, 0, 0, 346, 1315, 1, 0, 0, 0, 348, 1320, 1, 0, 0, 0, 350, 1325, 1, 0, 0, 0, 352, 1329, 1, 0, 0, 0, 354, 1333, 1, 0, 0, 0, 356, 1337, 1, 0, 0, 0, 358, 1342, 1, 0, 0, 0, 360, 1352, 1, 0, 0, 0, 362, 1356, 1, 0, 0, 0, 364, 1360, 1, 0, 0, 0, 366, 1364, 1, 0, 0, 0, 368, 1369, 1, 0, 0, 0, 370, 1376, 1, 0, 0, 0, 372, 1380, 1, 0, 0, 0, 374, 1384, 1, 0, 0, 0, 376, 1388, 1, 0, 0, 0, 378, 1392, 1, 0, 0, 0, 380, 1397, 1, 0, 0, 0, 382, 1403, 1, 0, 0, 0, 384, 1409, 1, 0, 0, 0, 386, 1413, 1, 0, 0, 0, 388, 1417, 1, 0, 0, 0, 390, 1421, 1, 0, 0, 0, 392, 1427, 1, 0, 0, 0, 394, 1433, 1, 0, 0, 0, 396, 1437, 1, 0, 0, 0, 398, 1441, 1, 0, 0, 0, 400, 1445, 1, 0, 0, 0, 402, 1451, 1, 0, 0, 0, 404, 1457, 1, 0, 0, 0, 406, 1463, 1, 0, 0, 0, 408, 409, 5, 100, 0, 0, 409, 410, 5, 105, 0, 0, 410, 411, 5, 115, 0, 0, 411, 412, 5, 115, 0, 0, 412, 413, 5, 101, 0, 0, 413, 414, 5, 99, 0, 0, 414, 415, 5, 116, 0, 0, 415, 416, 1, 0, 0, 0, 416, 417, 6, 0, 0, 0, 417, 17, 1, 0, 0, 0, 418, 419, 5, 100, 0, 0, 419, 420, 5, 114, 0, 0, 420, 421, 5, 111, 0, 0, 421, 422, 5, 112, 0, 0, 422, 423, 1, 0, 0, 0, 423, 424, 6, 1, 1, 0, 424, 19, 1, 0, 0, 0, 425, 426, 5, 101, 0, 0, 426, 427, 5, 110, 0, 0, 427, 428, 5, 114, 0, 0, 428, 429, 5, 105, 0, 0, 429, 430, 5, 99, 0, 0, 430, 431, 5, 104, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 6, 2, 2, 0, 433, 21, 1, 0, 0, 0, 434, 435, 5, 101, 0, 0, 435, 436, 5, 118, 0, 0, 436, 437, 5, 97, 0, 0, 437, 438, 5, 108, 0, 0, 438, 439, 1, 0, 0, 0, 439, 440, 6, 3, 0, 0, 440, 23, 1, 0, 0, 0, 441, 442, 5, 101, 0, 0, 442, 443, 5, 120, 0, 0, 443, 444, 5, 112, 0, 0, 444, 445, 5, 108, 0, 0, 445, 446, 5, 97, 0, 0, 446, 447, 5, 105, 0, 0, 447, 448, 5, 110, 0, 0, 448, 449, 1, 0, 0, 0, 449, 450, 6, 4, 3, 0, 450, 25, 1, 0, 0, 0, 451, 452, 5, 102, 0, 0, 452, 453, 5, 114, 0, 0, 453, 454, 5, 111, 0, 0, 454, 455, 5, 109, 0, 0, 455, 456, 1, 0, 0, 0, 456, 457, 6, 5, 4, 0, 457, 27, 1, 0, 0, 0, 458, 459, 5, 103, 0, 0, 459, 460, 5, 114, 0, 0, 460, 461, 5, 111, 0, 0, 461, 462, 5, 107, 0, 0, 462, 463, 1, 0, 0, 0, 463, 464, 6, 6, 0, 0, 464, 29, 1, 0, 0, 0, 465, 466, 5, 105, 0, 0, 466, 467, 5, 110, 0, 0, 467, 468, 5, 108, 0, 0, 468, 469, 5, 105, 0, 0, 469, 470, 5, 110, 0, 0, 470, 471, 5, 101, 0, 0, 471, 472, 5, 115, 0, 0, 472, 473, 5, 116, 0, 0, 473, 474, 5, 97, 0, 0, 474, 475, 5, 116, 0, 0, 475, 476, 5, 115, 0, 0, 476, 477, 1, 0, 0, 0, 477, 478, 6, 7, 0, 0, 478, 31, 1, 0, 0, 0, 479, 480, 5, 107, 0, 0, 480, 481, 5, 101, 0, 0, 481, 482, 5, 101, 0, 0, 482, 483, 5, 112, 0, 0, 483, 484, 1, 0, 0, 0, 484, 485, 6, 8, 1, 0, 485, 33, 1, 0, 0, 0, 486, 487, 5, 108, 0, 0, 487, 488, 5, 105, 0, 0, 488, 489, 5, 109, 0, 0, 489, 490, 5, 105, 0, 0, 490, 491, 5, 116, 0, 0, 491, 492, 1, 0, 0, 0, 492, 493, 6, 9, 0, 0, 493, 35, 1, 0, 0, 0, 494, 495, 5, 108, 0, 0, 495, 496, 5, 111, 0, 0, 496, 497, 5, 111, 0, 0, 497, 498, 5, 107, 0, 0, 498, 499, 5, 117, 0, 0, 499, 500, 5, 112, 0, 0, 500, 501, 1, 0, 0, 0, 501, 502, 6, 10, 5, 0, 502, 37, 1, 0, 0, 0, 503, 504, 5, 109, 0, 0, 504, 505, 5, 101, 0, 0, 505, 506, 5, 116, 0, 0, 506, 507, 5, 97, 0, 0, 507, 508, 1, 0, 0, 0, 508, 509, 6, 11, 6, 0, 509, 39, 1, 0, 0, 0, 510, 511, 5, 109, 0, 0, 511, 512, 5, 101, 0, 0, 512, 513, 5, 116, 0, 0, 513, 514, 5, 114, 0, 0, 514, 515, 5, 105, 0, 0, 515, 516, 5, 99, 0, 0, 516, 517, 5, 115, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 6, 12, 7, 0, 519, 41, 1, 0, 0, 0, 520, 521, 5, 109, 0, 0, 521, 522, 5, 118, 0, 0, 522, 523, 5, 95, 0, 0, 523, 524, 5, 101, 0, 0, 524, 525, 5, 120, 0, 0, 525, 526, 5, 112, 0, 0, 526, 527, 5, 97, 0, 0, 527, 528, 5, 110, 0, 0, 528, 529, 5, 100, 0, 0, 529, 530, 1, 0, 0, 0, 530, 531, 6, 13, 8, 0, 531, 43, 1, 0, 0, 0, 532, 533, 5, 114, 0, 0, 533, 534, 5, 101, 0, 0, 534, 535, 5, 110, 0, 0, 535, 536, 5, 97, 0, 0, 536, 537, 5, 109, 0, 0, 537, 538, 5, 101, 0, 0, 538, 539, 1, 0, 0, 0, 539, 540, 6, 14, 9, 0, 540, 45, 1, 0, 0, 0, 541, 542, 5, 114, 0, 0, 542, 543, 5, 111, 0, 0, 543, 544, 5, 119, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 6, 15, 0, 0, 546, 47, 1, 0, 0, 0, 547, 548, 5, 115, 0, 0, 548, 549, 5, 104, 0, 0, 549, 550, 5, 111, 0, 0, 550, 551, 5, 119, 0, 0, 551, 552, 1, 0, 0, 0, 552, 553, 6, 16, 10, 0, 553, 49, 1, 0, 0, 0, 554, 555, 5, 115, 0, 0, 555, 556, 5, 111, 0, 0, 556, 557, 5, 114, 0, 0, 557, 558, 5, 116, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 6, 17, 0, 0, 560, 51, 1, 0, 0, 0, 561, 562, 5, 115, 0, 0, 562, 563, 5, 116, 0, 0, 563, 564, 5, 97, 0, 0, 564, 565, 5, 116, 0, 0, 565, 566, 5, 115, 0, 0, 566, 567, 1, 0, 0, 0, 567, 568, 6, 18, 0, 0, 568, 53, 1, 0, 0, 0, 569, 570, 5, 119, 0, 0, 570, 571, 5, 104, 0, 0, 571, 572, 5, 101, 0, 0, 572, 573, 5, 114, 0, 0, 573, 574, 5, 101, 0, 0, 574, 575, 1, 0, 0, 0, 575, 576, 6, 19, 0, 0, 576, 55, 1, 0, 0, 0, 577, 578, 5, 109, 0, 0, 578, 579, 5, 97, 0, 0, 579, 580, 5, 116, 0, 0, 580, 581, 5, 99, 0, 0, 581, 582, 5, 104, 0, 0, 582, 583, 1, 0, 0, 0, 583, 584, 6, 20, 0, 0, 584, 57, 1, 0, 0, 0, 585, 587, 8, 0, 0, 0, 586, 585, 1, 0, 0, 0, 587, 588, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 590, 1, 0, 0, 0, 590, 591, 6, 21, 0, 0, 591, 59, 1, 0, 0, 0, 592, 593, 5, 47, 0, 0, 593, 594, 5, 47, 0, 0, 594, 598, 1, 0, 0, 0, 595, 597, 8, 1, 0, 0, 596, 595, 1, 0, 0, 0, 597, 600, 1, 0, 0, 0, 598, 596, 1, 0, 0, 0, 598, 599, 1, 0, 0, 0, 599, 602, 1, 0, 0, 0, 600, 598, 1, 0, 0, 0, 601, 603, 5, 13, 0, 0, 602, 601, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 605, 1, 0, 0, 0, 604, 606, 5, 10, 0, 0, 605, 604, 1, 0, 0, 0, 605, 606, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 6, 22, 11, 0, 608, 61, 1, 0, 0, 0, 609, 610, 5, 47, 0, 0, 610, 611, 5, 42, 0, 0, 611, 616, 1, 0, 0, 0, 612, 615, 3, 62, 23, 0, 613, 615, 9, 0, 0, 0, 614, 612, 1, 0, 0, 0, 614, 613, 1, 0, 0, 0, 615, 618, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 616, 614, 1, 0, 0, 0, 617, 619, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 619, 620, 5, 42, 0, 0, 620, 621, 5, 47, 0, 0, 621, 622, 1, 0, 0, 0, 622, 623, 6, 23, 11, 0, 623, 63, 1, 0, 0, 0, 624, 626, 7, 2, 0, 0, 625, 624, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 1, 0, 0, 0, 629, 630, 6, 24, 11, 0, 630, 65, 1, 0, 0, 0, 631, 635, 8, 3, 0, 0, 632, 633, 5, 47, 0, 0, 633, 635, 8, 4, 0, 0, 634, 631, 1, 0, 0, 0, 634, 632, 1, 0, 0, 0, 635, 67, 1, 0, 0, 0, 636, 638, 3, 66, 25, 0, 637, 636, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 637, 1, 0, 0, 0, 639, 640, 1, 0, 0, 0, 640, 69, 1, 0, 0, 0, 641, 642, 3, 182, 83, 0, 642, 643, 1, 0, 0, 0, 643, 644, 6, 27, 12, 0, 644, 645, 6, 27, 13, 0, 645, 71, 1, 0, 0, 0, 646, 647, 3, 80, 32, 0, 647, 648, 1, 0, 0, 0, 648, 649, 6, 28, 14, 0, 649, 650, 6, 28, 15, 0, 650, 73, 1, 0, 0, 0, 651, 652, 3, 64, 24, 0, 652, 653, 1, 0, 0, 0, 653, 654, 6, 29, 11, 0, 654, 75, 1, 0, 0, 0, 655, 656, 3, 60, 22, 0, 656, 657, 1, 0, 0, 0, 657, 658, 6, 30, 11, 0, 658, 77, 1, 0, 0, 0, 659, 660, 3, 62, 23, 0, 660, 661, 1, 0, 0, 0, 661, 662, 6, 31, 11, 0, 662, 79, 1, 0, 0, 0, 663, 664, 5, 124, 0, 0, 664, 665, 1, 0, 0, 0, 665, 666, 6, 32, 15, 0, 666, 81, 1, 0, 0, 0, 667, 668, 7, 5, 0, 0, 668, 83, 1, 0, 0, 0, 669, 670, 7, 6, 0, 0, 670, 85, 1, 0, 0, 0, 671, 672, 5, 92, 0, 0, 672, 673, 7, 7, 0, 0, 673, 87, 1, 0, 0, 0, 674, 675, 8, 8, 0, 0, 675, 89, 1, 0, 0, 0, 676, 678, 7, 9, 0, 0, 677, 679, 7, 10, 0, 0, 678, 677, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 681, 1, 0, 0, 0, 680, 682, 3, 82, 33, 0, 681, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 681, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 684, 91, 1, 0, 0, 0, 685, 686, 5, 64, 0, 0, 686, 93, 1, 0, 0, 0, 687, 688, 5, 96, 0, 0, 688, 95, 1, 0, 0, 0, 689, 693, 8, 11, 0, 0, 690, 691, 5, 96, 0, 0, 691, 693, 5, 96, 0, 0, 692, 689, 1, 0, 0, 0, 692, 690, 1, 0, 0, 0, 693, 97, 1, 0, 0, 0, 694, 695, 5, 95, 0, 0, 695, 99, 1, 0, 0, 0, 696, 700, 3, 84, 34, 0, 697, 700, 3, 82, 33, 0, 698, 700, 3, 98, 41, 0, 699, 696, 1, 0, 0, 0, 699, 697, 1, 0, 0, 0, 699, 698, 1, 0, 0, 0, 700, 101, 1, 0, 0, 0, 701, 706, 5, 34, 0, 0, 702, 705, 3, 86, 35, 0, 703, 705, 3, 88, 36, 0, 704, 702, 1, 0, 0, 0, 704, 703, 1, 0, 0, 0, 705, 708, 1, 0, 0, 0, 706, 704, 1, 0, 0, 0, 706, 707, 1, 0, 0, 0, 707, 709, 1, 0, 0, 0, 708, 706, 1, 0, 0, 0, 709, 731, 5, 34, 0, 0, 710, 711, 5, 34, 0, 0, 711, 712, 5, 34, 0, 0, 712, 713, 5, 34, 0, 0, 713, 717, 1, 0, 0, 0, 714, 716, 8, 1, 0, 0, 715, 714, 1, 0, 0, 0, 716, 719, 1, 0, 0, 0, 717, 718, 1, 0, 0, 0, 717, 715, 1, 0, 0, 0, 718, 720, 1, 0, 0, 0, 719, 717, 1, 0, 0, 0, 720, 721, 5, 34, 0, 0, 721, 722, 5, 34, 0, 0, 722, 723, 5, 34, 0, 0, 723, 725, 1, 0, 0, 0, 724, 726, 5, 34, 0, 0, 725, 724, 1, 0, 0, 0, 725, 726, 1, 0, 0, 0, 726, 728, 1, 0, 0, 0, 727, 729, 5, 34, 0, 0, 728, 727, 1, 0, 0, 0, 728, 729, 1, 0, 0, 0, 729, 731, 1, 0, 0, 0, 730, 701, 1, 0, 0, 0, 730, 710, 1, 0, 0, 0, 731, 103, 1, 0, 0, 0, 732, 734, 3, 82, 33, 0, 733, 732, 1, 0, 0, 0, 734, 735, 1, 0, 0, 0, 735, 733, 1, 0, 0, 0, 735, 736, 1, 0, 0, 0, 736, 105, 1, 0, 0, 0, 737, 739, 3, 82, 33, 0, 738, 737, 1, 0, 0, 0, 739, 740, 1, 0, 0, 0, 740, 738, 1, 0, 0, 0, 740, 741, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 746, 3, 122, 53, 0, 743, 745, 3, 82, 33, 0, 744, 743, 1, 0, 0, 0, 745, 748, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 746, 747, 1, 0, 0, 0, 747, 780, 1, 0, 0, 0, 748, 746, 1, 0, 0, 0, 749, 751, 3, 122, 53, 0, 750, 752, 3, 82, 33, 0, 751, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 780, 1, 0, 0, 0, 755, 757, 3, 82, 33, 0, 756, 755, 1, 0, 0, 0, 757, 758, 1, 0, 0, 0, 758, 756, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 767, 1, 0, 0, 0, 760, 764, 3, 122, 53, 0, 761, 763, 3, 82, 33, 0, 762, 761, 1, 0, 0, 0, 763, 766, 1, 0, 0, 0, 764, 762, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 768, 1, 0, 0, 0, 766, 764, 1, 0, 0, 0, 767, 760, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 769, 1, 0, 0, 0, 769, 770, 3, 90, 37, 0, 770, 780, 1, 0, 0, 0, 771, 773, 3, 122, 53, 0, 772, 774, 3, 82, 33, 0, 773, 772, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 773, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 777, 1, 0, 0, 0, 777, 778, 3, 90, 37, 0, 778, 780, 1, 0, 0, 0, 779, 738, 1, 0, 0, 0, 779, 749, 1, 0, 0, 0, 779, 756, 1, 0, 0, 0, 779, 771, 1, 0, 0, 0, 780, 107, 1, 0, 0, 0, 781, 782, 5, 98, 0, 0, 782, 783, 5, 121, 0, 0, 783, 109, 1, 0, 0, 0, 784, 785, 5, 97, 0, 0, 785, 786, 5, 110, 0, 0, 786, 787, 5, 100, 0, 0, 787, 111, 1, 0, 0, 0, 788, 789, 5, 97, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 99, 0, 0, 791, 113, 1, 0, 0, 0, 792, 793, 5, 61, 0, 0, 793, 115, 1, 0, 0, 0, 794, 795, 5, 58, 0, 0, 795, 796, 5, 58, 0, 0, 796, 117, 1, 0, 0, 0, 797, 798, 5, 44, 0, 0, 798, 119, 1, 0, 0, 0, 799, 800, 5, 100, 0, 0, 800, 801, 5, 101, 0, 0, 801, 802, 5, 115, 0, 0, 802, 803, 5, 99, 0, 0, 803, 121, 1, 0, 0, 0, 804, 805, 5, 46, 0, 0, 805, 123, 1, 0, 0, 0, 806, 807, 5, 102, 0, 0, 807, 808, 5, 97, 0, 0, 808, 809, 5, 108, 0, 0, 809, 810, 5, 115, 0, 0, 810, 811, 5, 101, 0, 0, 811, 125, 1, 0, 0, 0, 812, 813, 5, 102, 0, 0, 813, 814, 5, 105, 0, 0, 814, 815, 5, 114, 0, 0, 815, 816, 5, 115, 0, 0, 816, 817, 5, 116, 0, 0, 817, 127, 1, 0, 0, 0, 818, 819, 5, 105, 0, 0, 819, 820, 5, 110, 0, 0, 820, 129, 1, 0, 0, 0, 821, 822, 5, 105, 0, 0, 822, 823, 5, 115, 0, 0, 823, 131, 1, 0, 0, 0, 824, 825, 5, 108, 0, 0, 825, 826, 5, 97, 0, 0, 826, 827, 5, 115, 0, 0, 827, 828, 5, 116, 0, 0, 828, 133, 1, 0, 0, 0, 829, 830, 5, 108, 0, 0, 830, 831, 5, 105, 0, 0, 831, 832, 5, 107, 0, 0, 832, 833, 5, 101, 0, 0, 833, 135, 1, 0, 0, 0, 834, 835, 5, 40, 0, 0, 835, 137, 1, 0, 0, 0, 836, 837, 5, 109, 0, 0, 837, 838, 5, 97, 0, 0, 838, 839, 5, 116, 0, 0, 839, 840, 5, 99, 0, 0, 840, 841, 5, 104, 0, 0, 841, 139, 1, 0, 0, 0, 842, 843, 5, 110, 0, 0, 843, 844, 5, 111, 0, 0, 844, 845, 5, 116, 0, 0, 845, 141, 1, 0, 0, 0, 846, 847, 5, 110, 0, 0, 847, 848, 5, 117, 0, 0, 848, 849, 5, 108, 0, 0, 849, 850, 5, 108, 0, 0, 850, 143, 1, 0, 0, 0, 851, 852, 5, 110, 0, 0, 852, 853, 5, 117, 0, 0, 853, 854, 5, 108, 0, 0, 854, 855, 5, 108, 0, 0, 855, 856, 5, 115, 0, 0, 856, 145, 1, 0, 0, 0, 857, 858, 5, 111, 0, 0, 858, 859, 5, 114, 0, 0, 859, 147, 1, 0, 0, 0, 860, 861, 5, 63, 0, 0, 861, 149, 1, 0, 0, 0, 862, 863, 5, 114, 0, 0, 863, 864, 5, 108, 0, 0, 864, 865, 5, 105, 0, 0, 865, 866, 5, 107, 0, 0, 866, 867, 5, 101, 0, 0, 867, 151, 1, 0, 0, 0, 868, 869, 5, 41, 0, 0, 869, 153, 1, 0, 0, 0, 870, 871, 5, 116, 0, 0, 871, 872, 5, 114, 0, 0, 872, 873, 5, 117, 0, 0, 873, 874, 5, 101, 0, 0, 874, 155, 1, 0, 0, 0, 875, 876, 5, 61, 0, 0, 876, 877, 5, 61, 0, 0, 877, 157, 1, 0, 0, 0, 878, 879, 5, 61, 0, 0, 879, 880, 5, 126, 0, 0, 880, 159, 1, 0, 0, 0, 881, 882, 5, 33, 0, 0, 882, 883, 5, 61, 0, 0, 883, 161, 1, 0, 0, 0, 884, 885, 5, 60, 0, 0, 885, 163, 1, 0, 0, 0, 886, 887, 5, 60, 0, 0, 887, 888, 5, 61, 0, 0, 888, 165, 1, 0, 0, 0, 889, 890, 5, 62, 0, 0, 890, 167, 1, 0, 0, 0, 891, 892, 5, 62, 0, 0, 892, 893, 5, 61, 0, 0, 893, 169, 1, 0, 0, 0, 894, 895, 5, 43, 0, 0, 895, 171, 1, 0, 0, 0, 896, 897, 5, 45, 0, 0, 897, 173, 1, 0, 0, 0, 898, 899, 5, 42, 0, 0, 899, 175, 1, 0, 0, 0, 900, 901, 5, 47, 0, 0, 901, 177, 1, 0, 0, 0, 902, 903, 5, 37, 0, 0, 903, 179, 1, 0, 0, 0, 904, 905, 3, 148, 66, 0, 905, 909, 3, 84, 34, 0, 906, 908, 3, 100, 42, 0, 907, 906, 1, 0, 0, 0, 908, 911, 1, 0, 0, 0, 909, 907, 1, 0, 0, 0, 909, 910, 1, 0, 0, 0, 910, 919, 1, 0, 0, 0, 911, 909, 1, 0, 0, 0, 912, 914, 3, 148, 66, 0, 913, 915, 3, 82, 33, 0, 914, 913, 1, 0, 0, 0, 915, 916, 1, 0, 0, 0, 916, 914, 1, 0, 0, 0, 916, 917, 1, 0, 0, 0, 917, 919, 1, 0, 0, 0, 918, 904, 1, 0, 0, 0, 918, 912, 1, 0, 0, 0, 919, 181, 1, 0, 0, 0, 920, 921, 5, 91, 0, 0, 921, 922, 1, 0, 0, 0, 922, 923, 6, 83, 0, 0, 923, 924, 6, 83, 0, 0, 924, 183, 1, 0, 0, 0, 925, 926, 5, 93, 0, 0, 926, 927, 1, 0, 0, 0, 927, 928, 6, 84, 15, 0, 928, 929, 6, 84, 15, 0, 929, 185, 1, 0, 0, 0, 930, 934, 3, 84, 34, 0, 931, 933, 3, 100, 42, 0, 932, 931, 1, 0, 0, 0, 933, 936, 1, 0, 0, 0, 934, 932, 1, 0, 0, 0, 934, 935, 1, 0, 0, 0, 935, 947, 1, 0, 0, 0, 936, 934, 1, 0, 0, 0, 937, 940, 3, 98, 41, 0, 938, 940, 3, 92, 38, 0, 939, 937, 1, 0, 0, 0, 939, 938, 1, 0, 0, 0, 940, 942, 1, 0, 0, 0, 941, 943, 3, 100, 42, 0, 942, 941, 1, 0, 0, 0, 943, 944, 1, 0, 0, 0, 944, 942, 1, 0, 0, 0, 944, 945, 1, 0, 0, 0, 945, 947, 1, 0, 0, 0, 946, 930, 1, 0, 0, 0, 946, 939, 1, 0, 0, 0, 947, 187, 1, 0, 0, 0, 948, 950, 3, 94, 39, 0, 949, 951, 3, 96, 40, 0, 950, 949, 1, 0, 0, 0, 951, 952, 1, 0, 0, 0, 952, 950, 1, 0, 0, 0, 952, 953, 1, 0, 0, 0, 953, 954, 1, 0, 0, 0, 954, 955, 3, 94, 39, 0, 955, 189, 1, 0, 0, 0, 956, 957, 3, 188, 86, 0, 957, 191, 1, 0, 0, 0, 958, 959, 3, 60, 22, 0, 959, 960, 1, 0, 0, 0, 960, 961, 6, 88, 11, 0, 961, 193, 1, 0, 0, 0, 962, 963, 3, 62, 23, 0, 963, 964, 1, 0, 0, 0, 964, 965, 6, 89, 11, 0, 965, 195, 1, 0, 0, 0, 966, 967, 3, 64, 24, 0, 967, 968, 1, 0, 0, 0, 968, 969, 6, 90, 11, 0, 969, 197, 1, 0, 0, 0, 970, 971, 3, 80, 32, 0, 971, 972, 1, 0, 0, 0, 972, 973, 6, 91, 14, 0, 973, 974, 6, 91, 15, 0, 974, 199, 1, 0, 0, 0, 975, 976, 3, 182, 83, 0, 976, 977, 1, 0, 0, 0, 977, 978, 6, 92, 12, 0, 978, 201, 1, 0, 0, 0, 979, 980, 3, 184, 84, 0, 980, 981, 1, 0, 0, 0, 981, 982, 6, 93, 16, 0, 982, 203, 1, 0, 0, 0, 983, 984, 3, 368, 176, 0, 984, 985, 1, 0, 0, 0, 985, 986, 6, 94, 17, 0, 986, 205, 1, 0, 0, 0, 987, 988, 3, 118, 51, 0, 988, 989, 1, 0, 0, 0, 989, 990, 6, 95, 18, 0, 990, 207, 1, 0, 0, 0, 991, 992, 3, 114, 49, 0, 992, 993, 1, 0, 0, 0, 993, 994, 6, 96, 19, 0, 994, 209, 1, 0, 0, 0, 995, 996, 5, 109, 0, 0, 996, 997, 5, 101, 0, 0, 997, 998, 5, 116, 0, 0, 998, 999, 5, 97, 0, 0, 999, 1000, 5, 100, 0, 0, 1000, 1001, 5, 97, 0, 0, 1001, 1002, 5, 116, 0, 0, 1002, 1003, 5, 97, 0, 0, 1003, 211, 1, 0, 0, 0, 1004, 1005, 3, 68, 26, 0, 1005, 1006, 1, 0, 0, 0, 1006, 1007, 6, 98, 20, 0, 1007, 213, 1, 0, 0, 0, 1008, 1009, 3, 102, 43, 0, 1009, 1010, 1, 0, 0, 0, 1010, 1011, 6, 99, 21, 0, 1011, 215, 1, 0, 0, 0, 1012, 1013, 3, 60, 22, 0, 1013, 1014, 1, 0, 0, 0, 1014, 1015, 6, 100, 11, 0, 1015, 217, 1, 0, 0, 0, 1016, 1017, 3, 62, 23, 0, 1017, 1018, 1, 0, 0, 0, 1018, 1019, 6, 101, 11, 0, 1019, 219, 1, 0, 0, 0, 1020, 1021, 3, 64, 24, 0, 1021, 1022, 1, 0, 0, 0, 1022, 1023, 6, 102, 11, 0, 1023, 221, 1, 0, 0, 0, 1024, 1025, 3, 80, 32, 0, 1025, 1026, 1, 0, 0, 0, 1026, 1027, 6, 103, 14, 0, 1027, 1028, 6, 103, 15, 0, 1028, 223, 1, 0, 0, 0, 1029, 1030, 3, 122, 53, 0, 1030, 1031, 1, 0, 0, 0, 1031, 1032, 6, 104, 22, 0, 1032, 225, 1, 0, 0, 0, 1033, 1034, 3, 118, 51, 0, 1034, 1035, 1, 0, 0, 0, 1035, 1036, 6, 105, 18, 0, 1036, 227, 1, 0, 0, 0, 1037, 1042, 3, 84, 34, 0, 1038, 1042, 3, 82, 33, 0, 1039, 1042, 3, 98, 41, 0, 1040, 1042, 3, 174, 79, 0, 1041, 1037, 1, 0, 0, 0, 1041, 1038, 1, 0, 0, 0, 1041, 1039, 1, 0, 0, 0, 1041, 1040, 1, 0, 0, 0, 1042, 229, 1, 0, 0, 0, 1043, 1046, 3, 84, 34, 0, 1044, 1046, 3, 174, 79, 0, 1045, 1043, 1, 0, 0, 0, 1045, 1044, 1, 0, 0, 0, 1046, 1050, 1, 0, 0, 0, 1047, 1049, 3, 228, 106, 0, 1048, 1047, 1, 0, 0, 0, 1049, 1052, 1, 0, 0, 0, 1050, 1048, 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 1063, 1, 0, 0, 0, 1052, 1050, 1, 0, 0, 0, 1053, 1056, 3, 98, 41, 0, 1054, 1056, 3, 92, 38, 0, 1055, 1053, 1, 0, 0, 0, 1055, 1054, 1, 0, 0, 0, 1056, 1058, 1, 0, 0, 0, 1057, 1059, 3, 228, 106, 0, 1058, 1057, 1, 0, 0, 0, 1059, 1060, 1, 0, 0, 0, 1060, 1058, 1, 0, 0, 0, 1060, 1061, 1, 0, 0, 0, 1061, 1063, 1, 0, 0, 0, 1062, 1045, 1, 0, 0, 0, 1062, 1055, 1, 0, 0, 0, 1063, 231, 1, 0, 0, 0, 1064, 1067, 3, 230, 107, 0, 1065, 1067, 3, 188, 86, 0, 1066, 1064, 1, 0, 0, 0, 1066, 1065, 1, 0, 0, 0, 1067, 1068, 1, 0, 0, 0, 1068, 1066, 1, 0, 0, 0, 1068, 1069, 1, 0, 0, 0, 1069, 233, 1, 0, 0, 0, 1070, 1071, 3, 60, 22, 0, 1071, 1072, 1, 0, 0, 0, 1072, 1073, 6, 109, 11, 0, 1073, 235, 1, 0, 0, 0, 1074, 1075, 3, 62, 23, 0, 1075, 1076, 1, 0, 0, 0, 1076, 1077, 6, 110, 11, 0, 1077, 237, 1, 0, 0, 0, 1078, 1079, 3, 64, 24, 0, 1079, 1080, 1, 0, 0, 0, 1080, 1081, 6, 111, 11, 0, 1081, 239, 1, 0, 0, 0, 1082, 1083, 3, 80, 32, 0, 1083, 1084, 1, 0, 0, 0, 1084, 1085, 6, 112, 14, 0, 1085, 1086, 6, 112, 15, 0, 1086, 241, 1, 0, 0, 0, 1087, 1088, 3, 114, 49, 0, 1088, 1089, 1, 0, 0, 0, 1089, 1090, 6, 113, 19, 0, 1090, 243, 1, 0, 0, 0, 1091, 1092, 3, 118, 51, 0, 1092, 1093, 1, 0, 0, 0, 1093, 1094, 6, 114, 18, 0, 1094, 245, 1, 0, 0, 0, 1095, 1096, 3, 122, 53, 0, 1096, 1097, 1, 0, 0, 0, 1097, 1098, 6, 115, 22, 0, 1098, 247, 1, 0, 0, 0, 1099, 1100, 5, 97, 0, 0, 1100, 1101, 5, 115, 0, 0, 1101, 249, 1, 0, 0, 0, 1102, 1103, 3, 232, 108, 0, 1103, 1104, 1, 0, 0, 0, 1104, 1105, 6, 117, 23, 0, 1105, 251, 1, 0, 0, 0, 1106, 1107, 3, 60, 22, 0, 1107, 1108, 1, 0, 0, 0, 1108, 1109, 6, 118, 11, 0, 1109, 253, 1, 0, 0, 0, 1110, 1111, 3, 62, 23, 0, 1111, 1112, 1, 0, 0, 0, 1112, 1113, 6, 119, 11, 0, 1113, 255, 1, 0, 0, 0, 1114, 1115, 3, 64, 24, 0, 1115, 1116, 1, 0, 0, 0, 1116, 1117, 6, 120, 11, 0, 1117, 257, 1, 0, 0, 0, 1118, 1119, 3, 80, 32, 0, 1119, 1120, 1, 0, 0, 0, 1120, 1121, 6, 121, 14, 0, 1121, 1122, 6, 121, 15, 0, 1122, 259, 1, 0, 0, 0, 1123, 1124, 3, 182, 83, 0, 1124, 1125, 1, 0, 0, 0, 1125, 1126, 6, 122, 12, 0, 1126, 1127, 6, 122, 24, 0, 1127, 261, 1, 0, 0, 0, 1128, 1129, 5, 111, 0, 0, 1129, 1130, 5, 110, 0, 0, 1130, 1131, 1, 0, 0, 0, 1131, 1132, 6, 123, 25, 0, 1132, 263, 1, 0, 0, 0, 1133, 1134, 5, 119, 0, 0, 1134, 1135, 5, 105, 0, 0, 1135, 1136, 5, 116, 0, 0, 1136, 1137, 5, 104, 0, 0, 1137, 1138, 1, 0, 0, 0, 1138, 1139, 6, 124, 25, 0, 1139, 265, 1, 0, 0, 0, 1140, 1141, 8, 12, 0, 0, 1141, 267, 1, 0, 0, 0, 1142, 1144, 3, 266, 125, 0, 1143, 1142, 1, 0, 0, 0, 1144, 1145, 1, 0, 0, 0, 1145, 1143, 1, 0, 0, 0, 1145, 1146, 1, 0, 0, 0, 1146, 1147, 1, 0, 0, 0, 1147, 1148, 3, 368, 176, 0, 1148, 1150, 1, 0, 0, 0, 1149, 1143, 1, 0, 0, 0, 1149, 1150, 1, 0, 0, 0, 1150, 1152, 1, 0, 0, 0, 1151, 1153, 3, 266, 125, 0, 1152, 1151, 1, 0, 0, 0, 1153, 1154, 1, 0, 0, 0, 1154, 1152, 1, 0, 0, 0, 1154, 1155, 1, 0, 0, 0, 1155, 269, 1, 0, 0, 0, 1156, 1157, 3, 268, 126, 0, 1157, 1158, 1, 0, 0, 0, 1158, 1159, 6, 127, 26, 0, 1159, 271, 1, 0, 0, 0, 1160, 1161, 3, 60, 22, 0, 1161, 1162, 1, 0, 0, 0, 1162, 1163, 6, 128, 11, 0, 1163, 273, 1, 0, 0, 0, 1164, 1165, 3, 62, 23, 0, 1165, 1166, 1, 0, 0, 0, 1166, 1167, 6, 129, 11, 0, 1167, 275, 1, 0, 0, 0, 1168, 1169, 3, 64, 24, 0, 1169, 1170, 1, 0, 0, 0, 1170, 1171, 6, 130, 11, 0, 1171, 277, 1, 0, 0, 0, 1172, 1173, 3, 80, 32, 0, 1173, 1174, 1, 0, 0, 0, 1174, 1175, 6, 131, 14, 0, 1175, 1176, 6, 131, 15, 0, 1176, 1177, 6, 131, 15, 0, 1177, 279, 1, 0, 0, 0, 1178, 1179, 3, 114, 49, 0, 1179, 1180, 1, 0, 0, 0, 1180, 1181, 6, 132, 19, 0, 1181, 281, 1, 0, 0, 0, 1182, 1183, 3, 118, 51, 0, 1183, 1184, 1, 0, 0, 0, 1184, 1185, 6, 133, 18, 0, 1185, 283, 1, 0, 0, 0, 1186, 1187, 3, 122, 53, 0, 1187, 1188, 1, 0, 0, 0, 1188, 1189, 6, 134, 22, 0, 1189, 285, 1, 0, 0, 0, 1190, 1191, 3, 264, 124, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1193, 6, 135, 27, 0, 1193, 287, 1, 0, 0, 0, 1194, 1195, 3, 232, 108, 0, 1195, 1196, 1, 0, 0, 0, 1196, 1197, 6, 136, 23, 0, 1197, 289, 1, 0, 0, 0, 1198, 1199, 3, 190, 87, 0, 1199, 1200, 1, 0, 0, 0, 1200, 1201, 6, 137, 28, 0, 1201, 291, 1, 0, 0, 0, 1202, 1203, 3, 60, 22, 0, 1203, 1204, 1, 0, 0, 0, 1204, 1205, 6, 138, 11, 0, 1205, 293, 1, 0, 0, 0, 1206, 1207, 3, 62, 23, 0, 1207, 1208, 1, 0, 0, 0, 1208, 1209, 6, 139, 11, 0, 1209, 295, 1, 0, 0, 0, 1210, 1211, 3, 64, 24, 0, 1211, 1212, 1, 0, 0, 0, 1212, 1213, 6, 140, 11, 0, 1213, 297, 1, 0, 0, 0, 1214, 1215, 3, 80, 32, 0, 1215, 1216, 1, 0, 0, 0, 1216, 1217, 6, 141, 14, 0, 1217, 1218, 6, 141, 15, 0, 1218, 299, 1, 0, 0, 0, 1219, 1220, 3, 368, 176, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1222, 6, 142, 17, 0, 1222, 301, 1, 0, 0, 0, 1223, 1224, 3, 118, 51, 0, 1224, 1225, 1, 0, 0, 0, 1225, 1226, 6, 143, 18, 0, 1226, 303, 1, 0, 0, 0, 1227, 1228, 3, 122, 53, 0, 1228, 1229, 1, 0, 0, 0, 1229, 1230, 6, 144, 22, 0, 1230, 305, 1, 0, 0, 0, 1231, 1232, 3, 262, 123, 0, 1232, 1233, 1, 0, 0, 0, 1233, 1234, 6, 145, 29, 0, 1234, 1235, 6, 145, 30, 0, 1235, 307, 1, 0, 0, 0, 1236, 1237, 3, 68, 26, 0, 1237, 1238, 1, 0, 0, 0, 1238, 1239, 6, 146, 20, 0, 1239, 309, 1, 0, 0, 0, 1240, 1241, 3, 102, 43, 0, 1241, 1242, 1, 0, 0, 0, 1242, 1243, 6, 147, 21, 0, 1243, 311, 1, 0, 0, 0, 1244, 1245, 3, 60, 22, 0, 1245, 1246, 1, 0, 0, 0, 1246, 1247, 6, 148, 11, 0, 1247, 313, 1, 0, 0, 0, 1248, 1249, 3, 62, 23, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1251, 6, 149, 11, 0, 1251, 315, 1, 0, 0, 0, 1252, 1253, 3, 64, 24, 0, 1253, 1254, 1, 0, 0, 0, 1254, 1255, 6, 150, 11, 0, 1255, 317, 1, 0, 0, 0, 1256, 1257, 3, 80, 32, 0, 1257, 1258, 1, 0, 0, 0, 1258, 1259, 6, 151, 14, 0, 1259, 1260, 6, 151, 15, 0, 1260, 1261, 6, 151, 15, 0, 1261, 319, 1, 0, 0, 0, 1262, 1263, 3, 118, 51, 0, 1263, 1264, 1, 0, 0, 0, 1264, 1265, 6, 152, 18, 0, 1265, 321, 1, 0, 0, 0, 1266, 1267, 3, 122, 53, 0, 1267, 1268, 1, 0, 0, 0, 1268, 1269, 6, 153, 22, 0, 1269, 323, 1, 0, 0, 0, 1270, 1271, 3, 232, 108, 0, 1271, 1272, 1, 0, 0, 0, 1272, 1273, 6, 154, 23, 0, 1273, 325, 1, 0, 0, 0, 1274, 1275, 3, 60, 22, 0, 1275, 1276, 1, 0, 0, 0, 1276, 1277, 6, 155, 11, 0, 1277, 327, 1, 0, 0, 0, 1278, 1279, 3, 62, 23, 0, 1279, 1280, 1, 0, 0, 0, 1280, 1281, 6, 156, 11, 0, 1281, 329, 1, 0, 0, 0, 1282, 1283, 3, 64, 24, 0, 1283, 1284, 1, 0, 0, 0, 1284, 1285, 6, 157, 11, 0, 1285, 331, 1, 0, 0, 0, 1286, 1287, 3, 80, 32, 0, 1287, 1288, 1, 0, 0, 0, 1288, 1289, 6, 158, 14, 0, 1289, 1290, 6, 158, 15, 0, 1290, 333, 1, 0, 0, 0, 1291, 1292, 3, 122, 53, 0, 1292, 1293, 1, 0, 0, 0, 1293, 1294, 6, 159, 22, 0, 1294, 335, 1, 0, 0, 0, 1295, 1296, 3, 190, 87, 0, 1296, 1297, 1, 0, 0, 0, 1297, 1298, 6, 160, 28, 0, 1298, 337, 1, 0, 0, 0, 1299, 1300, 3, 186, 85, 0, 1300, 1301, 1, 0, 0, 0, 1301, 1302, 6, 161, 31, 0, 1302, 339, 1, 0, 0, 0, 1303, 1304, 3, 60, 22, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1306, 6, 162, 11, 0, 1306, 341, 1, 0, 0, 0, 1307, 1308, 3, 62, 23, 0, 1308, 1309, 1, 0, 0, 0, 1309, 1310, 6, 163, 11, 0, 1310, 343, 1, 0, 0, 0, 1311, 1312, 3, 64, 24, 0, 1312, 1313, 1, 0, 0, 0, 1313, 1314, 6, 164, 11, 0, 1314, 345, 1, 0, 0, 0, 1315, 1316, 3, 80, 32, 0, 1316, 1317, 1, 0, 0, 0, 1317, 1318, 6, 165, 14, 0, 1318, 1319, 6, 165, 15, 0, 1319, 347, 1, 0, 0, 0, 1320, 1321, 5, 105, 0, 0, 1321, 1322, 5, 110, 0, 0, 1322, 1323, 5, 102, 0, 0, 1323, 1324, 5, 111, 0, 0, 1324, 349, 1, 0, 0, 0, 1325, 1326, 3, 60, 22, 0, 1326, 1327, 1, 0, 0, 0, 1327, 1328, 6, 167, 11, 0, 1328, 351, 1, 0, 0, 0, 1329, 1330, 3, 62, 23, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1332, 6, 168, 11, 0, 1332, 353, 1, 0, 0, 0, 1333, 1334, 3, 64, 24, 0, 1334, 1335, 1, 0, 0, 0, 1335, 1336, 6, 169, 11, 0, 1336, 355, 1, 0, 0, 0, 1337, 1338, 3, 80, 32, 0, 1338, 1339, 1, 0, 0, 0, 1339, 1340, 6, 170, 14, 0, 1340, 1341, 6, 170, 15, 0, 1341, 357, 1, 0, 0, 0, 1342, 1343, 5, 102, 0, 0, 1343, 1344, 5, 117, 0, 0, 1344, 1345, 5, 110, 0, 0, 1345, 1346, 5, 99, 0, 0, 1346, 1347, 5, 116, 0, 0, 1347, 1348, 5, 105, 0, 0, 1348, 1349, 5, 111, 0, 0, 1349, 1350, 5, 110, 0, 0, 1350, 1351, 5, 115, 0, 0, 1351, 359, 1, 0, 0, 0, 1352, 1353, 3, 60, 22, 0, 1353, 1354, 1, 0, 0, 0, 1354, 1355, 6, 172, 11, 0, 1355, 361, 1, 0, 0, 0, 1356, 1357, 3, 62, 23, 0, 1357, 1358, 1, 0, 0, 0, 1358, 1359, 6, 173, 11, 0, 1359, 363, 1, 0, 0, 0, 1360, 1361, 3, 64, 24, 0, 1361, 1362, 1, 0, 0, 0, 1362, 1363, 6, 174, 11, 0, 1363, 365, 1, 0, 0, 0, 1364, 1365, 3, 184, 84, 0, 1365, 1366, 1, 0, 0, 0, 1366, 1367, 6, 175, 16, 0, 1367, 1368, 6, 175, 15, 0, 1368, 367, 1, 0, 0, 0, 1369, 1370, 5, 58, 0, 0, 1370, 369, 1, 0, 0, 0, 1371, 1377, 3, 92, 38, 0, 1372, 1377, 3, 82, 33, 0, 1373, 1377, 3, 122, 53, 0, 1374, 1377, 3, 84, 34, 0, 1375, 1377, 3, 98, 41, 0, 1376, 1371, 1, 0, 0, 0, 1376, 1372, 1, 0, 0, 0, 1376, 1373, 1, 0, 0, 0, 1376, 1374, 1, 0, 0, 0, 1376, 1375, 1, 0, 0, 0, 1377, 1378, 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 371, 1, 0, 0, 0, 1380, 1381, 3, 60, 22, 0, 1381, 1382, 1, 0, 0, 0, 1382, 1383, 6, 178, 11, 0, 1383, 373, 1, 0, 0, 0, 1384, 1385, 3, 62, 23, 0, 1385, 1386, 1, 0, 0, 0, 1386, 1387, 6, 179, 11, 0, 1387, 375, 1, 0, 0, 0, 1388, 1389, 3, 64, 24, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 6, 180, 11, 0, 1391, 377, 1, 0, 0, 0, 1392, 1393, 3, 80, 32, 0, 1393, 1394, 1, 0, 0, 0, 1394, 1395, 6, 181, 14, 0, 1395, 1396, 6, 181, 15, 0, 1396, 379, 1, 0, 0, 0, 1397, 1398, 3, 68, 26, 0, 1398, 1399, 1, 0, 0, 0, 1399, 1400, 6, 182, 20, 0, 1400, 1401, 6, 182, 15, 0, 1401, 1402, 6, 182, 32, 0, 1402, 381, 1, 0, 0, 0, 1403, 1404, 3, 102, 43, 0, 1404, 1405, 1, 0, 0, 0, 1405, 1406, 6, 183, 21, 0, 1406, 1407, 6, 183, 15, 0, 1407, 1408, 6, 183, 32, 0, 1408, 383, 1, 0, 0, 0, 1409, 1410, 3, 60, 22, 0, 1410, 1411, 1, 0, 0, 0, 1411, 1412, 6, 184, 11, 0, 1412, 385, 1, 0, 0, 0, 1413, 1414, 3, 62, 23, 0, 1414, 1415, 1, 0, 0, 0, 1415, 1416, 6, 185, 11, 0, 1416, 387, 1, 0, 0, 0, 1417, 1418, 3, 64, 24, 0, 1418, 1419, 1, 0, 0, 0, 1419, 1420, 6, 186, 11, 0, 1420, 389, 1, 0, 0, 0, 1421, 1422, 3, 368, 176, 0, 1422, 1423, 1, 0, 0, 0, 1423, 1424, 6, 187, 17, 0, 1424, 1425, 6, 187, 15, 0, 1425, 1426, 6, 187, 7, 0, 1426, 391, 1, 0, 0, 0, 1427, 1428, 3, 118, 51, 0, 1428, 1429, 1, 0, 0, 0, 1429, 1430, 6, 188, 18, 0, 1430, 1431, 6, 188, 15, 0, 1431, 1432, 6, 188, 7, 0, 1432, 393, 1, 0, 0, 0, 1433, 1434, 3, 60, 22, 0, 1434, 1435, 1, 0, 0, 0, 1435, 1436, 6, 189, 11, 0, 1436, 395, 1, 0, 0, 0, 1437, 1438, 3, 62, 23, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 6, 190, 11, 0, 1440, 397, 1, 0, 0, 0, 1441, 1442, 3, 64, 24, 0, 1442, 1443, 1, 0, 0, 0, 1443, 1444, 6, 191, 11, 0, 1444, 399, 1, 0, 0, 0, 1445, 1446, 3, 190, 87, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1448, 6, 192, 15, 0, 1448, 1449, 6, 192, 0, 0, 1449, 1450, 6, 192, 28, 0, 1450, 401, 1, 0, 0, 0, 1451, 1452, 3, 186, 85, 0, 1452, 1453, 1, 0, 0, 0, 1453, 1454, 6, 193, 15, 0, 1454, 1455, 6, 193, 0, 0, 1455, 1456, 6, 193, 31, 0, 1456, 403, 1, 0, 0, 0, 1457, 1458, 3, 108, 46, 0, 1458, 1459, 1, 0, 0, 0, 1459, 1460, 6, 194, 15, 0, 1460, 1461, 6, 194, 0, 0, 1461, 1462, 6, 194, 33, 0, 1462, 405, 1, 0, 0, 0, 1463, 1464, 3, 80, 32, 0, 1464, 1465, 1, 0, 0, 0, 1465, 1466, 6, 195, 14, 0, 1466, 1467, 6, 195, 15, 0, 1467, 407, 1, 0, 0, 0, 65, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 588, 598, 602, 605, 614, 616, 627, 634, 639, 678, 683, 692, 699, 704, 706, 717, 725, 728, 730, 735, 740, 746, 753, 758, 764, 767, 775, 779, 909, 916, 918, 934, 939, 944, 946, 952, 1041, 1045, 1050, 1055, 1060, 1062, 1066, 1068, 1145, 1149, 1154, 1376, 1378, 34, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 71, 0, 5, 0, 0, 7, 30, 0, 4, 0, 0, 7, 72, 0, 7, 116, 0, 7, 39, 0, 7, 37, 0, 7, 26, 0, 7, 31, 0, 7, 41, 0, 7, 82, 0, 5, 13, 0, 5, 7, 0, 7, 92, 0, 7, 91, 0, 7, 74, 0, 7, 90, 0, 5, 9, 0, 7, 73, 0, 5, 15, 0, 7, 34, 0] \ No newline at end of file +[4, 0, 126, 1471, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 4, 21, 587, 8, 21, 11, 21, 12, 21, 588, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 597, 8, 22, 10, 22, 12, 22, 600, 9, 22, 1, 22, 3, 22, 603, 8, 22, 1, 22, 3, 22, 606, 8, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 615, 8, 23, 10, 23, 12, 23, 618, 9, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 4, 24, 626, 8, 24, 11, 24, 12, 24, 627, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 3, 25, 635, 8, 25, 1, 26, 4, 26, 638, 8, 26, 11, 26, 12, 26, 639, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 1, 37, 3, 37, 679, 8, 37, 1, 37, 4, 37, 682, 8, 37, 11, 37, 12, 37, 683, 1, 38, 1, 38, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 3, 40, 693, 8, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 3, 42, 700, 8, 42, 1, 43, 1, 43, 1, 43, 5, 43, 705, 8, 43, 10, 43, 12, 43, 708, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 716, 8, 43, 10, 43, 12, 43, 719, 9, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 3, 43, 726, 8, 43, 1, 43, 3, 43, 729, 8, 43, 3, 43, 731, 8, 43, 1, 44, 4, 44, 734, 8, 44, 11, 44, 12, 44, 735, 1, 45, 4, 45, 739, 8, 45, 11, 45, 12, 45, 740, 1, 45, 1, 45, 5, 45, 745, 8, 45, 10, 45, 12, 45, 748, 9, 45, 1, 45, 1, 45, 4, 45, 752, 8, 45, 11, 45, 12, 45, 753, 1, 45, 4, 45, 757, 8, 45, 11, 45, 12, 45, 758, 1, 45, 1, 45, 5, 45, 763, 8, 45, 10, 45, 12, 45, 766, 9, 45, 3, 45, 768, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 4, 45, 774, 8, 45, 11, 45, 12, 45, 775, 1, 45, 1, 45, 3, 45, 780, 8, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 79, 1, 79, 1, 80, 1, 80, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 3, 82, 908, 8, 82, 1, 82, 5, 82, 911, 8, 82, 10, 82, 12, 82, 914, 9, 82, 1, 82, 1, 82, 4, 82, 918, 8, 82, 11, 82, 12, 82, 919, 3, 82, 922, 8, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 5, 85, 936, 8, 85, 10, 85, 12, 85, 939, 9, 85, 1, 85, 1, 85, 3, 85, 943, 8, 85, 1, 85, 4, 85, 946, 8, 85, 11, 85, 12, 85, 947, 3, 85, 950, 8, 85, 1, 86, 1, 86, 4, 86, 954, 8, 86, 11, 86, 12, 86, 955, 1, 86, 1, 86, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 3, 106, 1045, 8, 106, 1, 107, 1, 107, 3, 107, 1049, 8, 107, 1, 107, 5, 107, 1052, 8, 107, 10, 107, 12, 107, 1055, 9, 107, 1, 107, 1, 107, 3, 107, 1059, 8, 107, 1, 107, 4, 107, 1062, 8, 107, 11, 107, 12, 107, 1063, 3, 107, 1066, 8, 107, 1, 108, 1, 108, 4, 108, 1070, 8, 108, 11, 108, 12, 108, 1071, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 126, 4, 126, 1147, 8, 126, 11, 126, 12, 126, 1148, 1, 126, 1, 126, 3, 126, 1153, 8, 126, 1, 126, 4, 126, 1156, 8, 126, 11, 126, 12, 126, 1157, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 4, 177, 1380, 8, 177, 11, 177, 12, 177, 1381, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 2, 616, 717, 0, 196, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 25, 66, 0, 68, 26, 70, 0, 72, 0, 74, 27, 76, 28, 78, 29, 80, 30, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 0, 96, 0, 98, 0, 100, 0, 102, 31, 104, 32, 106, 33, 108, 34, 110, 35, 112, 36, 114, 37, 116, 38, 118, 39, 120, 40, 122, 41, 124, 42, 126, 43, 128, 44, 130, 45, 132, 46, 134, 47, 136, 48, 138, 49, 140, 50, 142, 51, 144, 52, 146, 53, 148, 54, 150, 55, 152, 56, 154, 57, 156, 58, 158, 59, 160, 60, 162, 61, 164, 62, 166, 63, 168, 64, 170, 65, 172, 66, 174, 67, 176, 68, 178, 69, 180, 70, 182, 71, 184, 72, 186, 73, 188, 0, 190, 74, 192, 75, 194, 76, 196, 77, 198, 0, 200, 0, 202, 0, 204, 0, 206, 0, 208, 0, 210, 78, 212, 0, 214, 0, 216, 79, 218, 80, 220, 81, 222, 0, 224, 0, 226, 0, 228, 0, 230, 0, 232, 82, 234, 83, 236, 84, 238, 85, 240, 0, 242, 0, 244, 0, 246, 0, 248, 86, 250, 0, 252, 87, 254, 88, 256, 89, 258, 0, 260, 0, 262, 90, 264, 91, 266, 0, 268, 92, 270, 0, 272, 93, 274, 94, 276, 95, 278, 0, 280, 0, 282, 0, 284, 0, 286, 0, 288, 0, 290, 0, 292, 96, 294, 97, 296, 98, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 0, 310, 0, 312, 99, 314, 100, 316, 101, 318, 0, 320, 0, 322, 0, 324, 0, 326, 102, 328, 103, 330, 104, 332, 0, 334, 0, 336, 0, 338, 0, 340, 105, 342, 106, 344, 107, 346, 0, 348, 108, 350, 109, 352, 110, 354, 111, 356, 0, 358, 112, 360, 113, 362, 114, 364, 115, 366, 0, 368, 116, 370, 117, 372, 118, 374, 119, 376, 120, 378, 0, 380, 0, 382, 0, 384, 121, 386, 122, 388, 123, 390, 0, 392, 0, 394, 124, 396, 125, 398, 126, 400, 0, 402, 0, 404, 0, 406, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 5, 0, 34, 34, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1498, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 68, 1, 0, 0, 0, 1, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 74, 1, 0, 0, 0, 1, 76, 1, 0, 0, 0, 1, 78, 1, 0, 0, 0, 2, 80, 1, 0, 0, 0, 2, 102, 1, 0, 0, 0, 2, 104, 1, 0, 0, 0, 2, 106, 1, 0, 0, 0, 2, 108, 1, 0, 0, 0, 2, 110, 1, 0, 0, 0, 2, 112, 1, 0, 0, 0, 2, 114, 1, 0, 0, 0, 2, 116, 1, 0, 0, 0, 2, 118, 1, 0, 0, 0, 2, 120, 1, 0, 0, 0, 2, 122, 1, 0, 0, 0, 2, 124, 1, 0, 0, 0, 2, 126, 1, 0, 0, 0, 2, 128, 1, 0, 0, 0, 2, 130, 1, 0, 0, 0, 2, 132, 1, 0, 0, 0, 2, 134, 1, 0, 0, 0, 2, 136, 1, 0, 0, 0, 2, 138, 1, 0, 0, 0, 2, 140, 1, 0, 0, 0, 2, 142, 1, 0, 0, 0, 2, 144, 1, 0, 0, 0, 2, 146, 1, 0, 0, 0, 2, 148, 1, 0, 0, 0, 2, 150, 1, 0, 0, 0, 2, 152, 1, 0, 0, 0, 2, 154, 1, 0, 0, 0, 2, 156, 1, 0, 0, 0, 2, 158, 1, 0, 0, 0, 2, 160, 1, 0, 0, 0, 2, 162, 1, 0, 0, 0, 2, 164, 1, 0, 0, 0, 2, 166, 1, 0, 0, 0, 2, 168, 1, 0, 0, 0, 2, 170, 1, 0, 0, 0, 2, 172, 1, 0, 0, 0, 2, 174, 1, 0, 0, 0, 2, 176, 1, 0, 0, 0, 2, 178, 1, 0, 0, 0, 2, 180, 1, 0, 0, 0, 2, 182, 1, 0, 0, 0, 2, 184, 1, 0, 0, 0, 2, 186, 1, 0, 0, 0, 2, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 2, 194, 1, 0, 0, 0, 2, 196, 1, 0, 0, 0, 3, 198, 1, 0, 0, 0, 3, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 216, 1, 0, 0, 0, 3, 218, 1, 0, 0, 0, 3, 220, 1, 0, 0, 0, 4, 222, 1, 0, 0, 0, 4, 224, 1, 0, 0, 0, 4, 226, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 4, 236, 1, 0, 0, 0, 4, 238, 1, 0, 0, 0, 5, 240, 1, 0, 0, 0, 5, 242, 1, 0, 0, 0, 5, 244, 1, 0, 0, 0, 5, 246, 1, 0, 0, 0, 5, 248, 1, 0, 0, 0, 5, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 5, 254, 1, 0, 0, 0, 5, 256, 1, 0, 0, 0, 6, 258, 1, 0, 0, 0, 6, 260, 1, 0, 0, 0, 6, 262, 1, 0, 0, 0, 6, 264, 1, 0, 0, 0, 6, 268, 1, 0, 0, 0, 6, 270, 1, 0, 0, 0, 6, 272, 1, 0, 0, 0, 6, 274, 1, 0, 0, 0, 6, 276, 1, 0, 0, 0, 7, 278, 1, 0, 0, 0, 7, 280, 1, 0, 0, 0, 7, 282, 1, 0, 0, 0, 7, 284, 1, 0, 0, 0, 7, 286, 1, 0, 0, 0, 7, 288, 1, 0, 0, 0, 7, 290, 1, 0, 0, 0, 7, 292, 1, 0, 0, 0, 7, 294, 1, 0, 0, 0, 7, 296, 1, 0, 0, 0, 8, 298, 1, 0, 0, 0, 8, 300, 1, 0, 0, 0, 8, 302, 1, 0, 0, 0, 8, 304, 1, 0, 0, 0, 8, 306, 1, 0, 0, 0, 8, 308, 1, 0, 0, 0, 8, 310, 1, 0, 0, 0, 8, 312, 1, 0, 0, 0, 8, 314, 1, 0, 0, 0, 8, 316, 1, 0, 0, 0, 9, 318, 1, 0, 0, 0, 9, 320, 1, 0, 0, 0, 9, 322, 1, 0, 0, 0, 9, 324, 1, 0, 0, 0, 9, 326, 1, 0, 0, 0, 9, 328, 1, 0, 0, 0, 9, 330, 1, 0, 0, 0, 10, 332, 1, 0, 0, 0, 10, 334, 1, 0, 0, 0, 10, 336, 1, 0, 0, 0, 10, 338, 1, 0, 0, 0, 10, 340, 1, 0, 0, 0, 10, 342, 1, 0, 0, 0, 10, 344, 1, 0, 0, 0, 11, 346, 1, 0, 0, 0, 11, 348, 1, 0, 0, 0, 11, 350, 1, 0, 0, 0, 11, 352, 1, 0, 0, 0, 11, 354, 1, 0, 0, 0, 12, 356, 1, 0, 0, 0, 12, 358, 1, 0, 0, 0, 12, 360, 1, 0, 0, 0, 12, 362, 1, 0, 0, 0, 12, 364, 1, 0, 0, 0, 13, 366, 1, 0, 0, 0, 13, 368, 1, 0, 0, 0, 13, 370, 1, 0, 0, 0, 13, 372, 1, 0, 0, 0, 13, 374, 1, 0, 0, 0, 13, 376, 1, 0, 0, 0, 14, 378, 1, 0, 0, 0, 14, 380, 1, 0, 0, 0, 14, 382, 1, 0, 0, 0, 14, 384, 1, 0, 0, 0, 14, 386, 1, 0, 0, 0, 14, 388, 1, 0, 0, 0, 15, 390, 1, 0, 0, 0, 15, 392, 1, 0, 0, 0, 15, 394, 1, 0, 0, 0, 15, 396, 1, 0, 0, 0, 15, 398, 1, 0, 0, 0, 15, 400, 1, 0, 0, 0, 15, 402, 1, 0, 0, 0, 15, 404, 1, 0, 0, 0, 15, 406, 1, 0, 0, 0, 16, 408, 1, 0, 0, 0, 18, 418, 1, 0, 0, 0, 20, 425, 1, 0, 0, 0, 22, 434, 1, 0, 0, 0, 24, 441, 1, 0, 0, 0, 26, 451, 1, 0, 0, 0, 28, 458, 1, 0, 0, 0, 30, 465, 1, 0, 0, 0, 32, 479, 1, 0, 0, 0, 34, 486, 1, 0, 0, 0, 36, 494, 1, 0, 0, 0, 38, 503, 1, 0, 0, 0, 40, 510, 1, 0, 0, 0, 42, 520, 1, 0, 0, 0, 44, 532, 1, 0, 0, 0, 46, 541, 1, 0, 0, 0, 48, 547, 1, 0, 0, 0, 50, 554, 1, 0, 0, 0, 52, 561, 1, 0, 0, 0, 54, 569, 1, 0, 0, 0, 56, 577, 1, 0, 0, 0, 58, 586, 1, 0, 0, 0, 60, 592, 1, 0, 0, 0, 62, 609, 1, 0, 0, 0, 64, 625, 1, 0, 0, 0, 66, 634, 1, 0, 0, 0, 68, 637, 1, 0, 0, 0, 70, 641, 1, 0, 0, 0, 72, 646, 1, 0, 0, 0, 74, 651, 1, 0, 0, 0, 76, 655, 1, 0, 0, 0, 78, 659, 1, 0, 0, 0, 80, 663, 1, 0, 0, 0, 82, 667, 1, 0, 0, 0, 84, 669, 1, 0, 0, 0, 86, 671, 1, 0, 0, 0, 88, 674, 1, 0, 0, 0, 90, 676, 1, 0, 0, 0, 92, 685, 1, 0, 0, 0, 94, 687, 1, 0, 0, 0, 96, 692, 1, 0, 0, 0, 98, 694, 1, 0, 0, 0, 100, 699, 1, 0, 0, 0, 102, 730, 1, 0, 0, 0, 104, 733, 1, 0, 0, 0, 106, 779, 1, 0, 0, 0, 108, 781, 1, 0, 0, 0, 110, 784, 1, 0, 0, 0, 112, 788, 1, 0, 0, 0, 114, 792, 1, 0, 0, 0, 116, 794, 1, 0, 0, 0, 118, 797, 1, 0, 0, 0, 120, 799, 1, 0, 0, 0, 122, 804, 1, 0, 0, 0, 124, 806, 1, 0, 0, 0, 126, 812, 1, 0, 0, 0, 128, 818, 1, 0, 0, 0, 130, 821, 1, 0, 0, 0, 132, 824, 1, 0, 0, 0, 134, 829, 1, 0, 0, 0, 136, 834, 1, 0, 0, 0, 138, 836, 1, 0, 0, 0, 140, 842, 1, 0, 0, 0, 142, 846, 1, 0, 0, 0, 144, 851, 1, 0, 0, 0, 146, 857, 1, 0, 0, 0, 148, 860, 1, 0, 0, 0, 150, 862, 1, 0, 0, 0, 152, 868, 1, 0, 0, 0, 154, 870, 1, 0, 0, 0, 156, 875, 1, 0, 0, 0, 158, 878, 1, 0, 0, 0, 160, 881, 1, 0, 0, 0, 162, 884, 1, 0, 0, 0, 164, 886, 1, 0, 0, 0, 166, 889, 1, 0, 0, 0, 168, 891, 1, 0, 0, 0, 170, 894, 1, 0, 0, 0, 172, 896, 1, 0, 0, 0, 174, 898, 1, 0, 0, 0, 176, 900, 1, 0, 0, 0, 178, 902, 1, 0, 0, 0, 180, 921, 1, 0, 0, 0, 182, 923, 1, 0, 0, 0, 184, 928, 1, 0, 0, 0, 186, 949, 1, 0, 0, 0, 188, 951, 1, 0, 0, 0, 190, 959, 1, 0, 0, 0, 192, 961, 1, 0, 0, 0, 194, 965, 1, 0, 0, 0, 196, 969, 1, 0, 0, 0, 198, 973, 1, 0, 0, 0, 200, 978, 1, 0, 0, 0, 202, 982, 1, 0, 0, 0, 204, 986, 1, 0, 0, 0, 206, 990, 1, 0, 0, 0, 208, 994, 1, 0, 0, 0, 210, 998, 1, 0, 0, 0, 212, 1007, 1, 0, 0, 0, 214, 1011, 1, 0, 0, 0, 216, 1015, 1, 0, 0, 0, 218, 1019, 1, 0, 0, 0, 220, 1023, 1, 0, 0, 0, 222, 1027, 1, 0, 0, 0, 224, 1032, 1, 0, 0, 0, 226, 1036, 1, 0, 0, 0, 228, 1044, 1, 0, 0, 0, 230, 1065, 1, 0, 0, 0, 232, 1069, 1, 0, 0, 0, 234, 1073, 1, 0, 0, 0, 236, 1077, 1, 0, 0, 0, 238, 1081, 1, 0, 0, 0, 240, 1085, 1, 0, 0, 0, 242, 1090, 1, 0, 0, 0, 244, 1094, 1, 0, 0, 0, 246, 1098, 1, 0, 0, 0, 248, 1102, 1, 0, 0, 0, 250, 1105, 1, 0, 0, 0, 252, 1109, 1, 0, 0, 0, 254, 1113, 1, 0, 0, 0, 256, 1117, 1, 0, 0, 0, 258, 1121, 1, 0, 0, 0, 260, 1126, 1, 0, 0, 0, 262, 1131, 1, 0, 0, 0, 264, 1136, 1, 0, 0, 0, 266, 1143, 1, 0, 0, 0, 268, 1152, 1, 0, 0, 0, 270, 1159, 1, 0, 0, 0, 272, 1163, 1, 0, 0, 0, 274, 1167, 1, 0, 0, 0, 276, 1171, 1, 0, 0, 0, 278, 1175, 1, 0, 0, 0, 280, 1181, 1, 0, 0, 0, 282, 1185, 1, 0, 0, 0, 284, 1189, 1, 0, 0, 0, 286, 1193, 1, 0, 0, 0, 288, 1197, 1, 0, 0, 0, 290, 1201, 1, 0, 0, 0, 292, 1205, 1, 0, 0, 0, 294, 1209, 1, 0, 0, 0, 296, 1213, 1, 0, 0, 0, 298, 1217, 1, 0, 0, 0, 300, 1222, 1, 0, 0, 0, 302, 1226, 1, 0, 0, 0, 304, 1230, 1, 0, 0, 0, 306, 1234, 1, 0, 0, 0, 308, 1239, 1, 0, 0, 0, 310, 1243, 1, 0, 0, 0, 312, 1247, 1, 0, 0, 0, 314, 1251, 1, 0, 0, 0, 316, 1255, 1, 0, 0, 0, 318, 1259, 1, 0, 0, 0, 320, 1265, 1, 0, 0, 0, 322, 1269, 1, 0, 0, 0, 324, 1273, 1, 0, 0, 0, 326, 1277, 1, 0, 0, 0, 328, 1281, 1, 0, 0, 0, 330, 1285, 1, 0, 0, 0, 332, 1289, 1, 0, 0, 0, 334, 1294, 1, 0, 0, 0, 336, 1298, 1, 0, 0, 0, 338, 1302, 1, 0, 0, 0, 340, 1306, 1, 0, 0, 0, 342, 1310, 1, 0, 0, 0, 344, 1314, 1, 0, 0, 0, 346, 1318, 1, 0, 0, 0, 348, 1323, 1, 0, 0, 0, 350, 1328, 1, 0, 0, 0, 352, 1332, 1, 0, 0, 0, 354, 1336, 1, 0, 0, 0, 356, 1340, 1, 0, 0, 0, 358, 1345, 1, 0, 0, 0, 360, 1355, 1, 0, 0, 0, 362, 1359, 1, 0, 0, 0, 364, 1363, 1, 0, 0, 0, 366, 1367, 1, 0, 0, 0, 368, 1372, 1, 0, 0, 0, 370, 1379, 1, 0, 0, 0, 372, 1383, 1, 0, 0, 0, 374, 1387, 1, 0, 0, 0, 376, 1391, 1, 0, 0, 0, 378, 1395, 1, 0, 0, 0, 380, 1400, 1, 0, 0, 0, 382, 1406, 1, 0, 0, 0, 384, 1412, 1, 0, 0, 0, 386, 1416, 1, 0, 0, 0, 388, 1420, 1, 0, 0, 0, 390, 1424, 1, 0, 0, 0, 392, 1430, 1, 0, 0, 0, 394, 1436, 1, 0, 0, 0, 396, 1440, 1, 0, 0, 0, 398, 1444, 1, 0, 0, 0, 400, 1448, 1, 0, 0, 0, 402, 1454, 1, 0, 0, 0, 404, 1460, 1, 0, 0, 0, 406, 1466, 1, 0, 0, 0, 408, 409, 5, 100, 0, 0, 409, 410, 5, 105, 0, 0, 410, 411, 5, 115, 0, 0, 411, 412, 5, 115, 0, 0, 412, 413, 5, 101, 0, 0, 413, 414, 5, 99, 0, 0, 414, 415, 5, 116, 0, 0, 415, 416, 1, 0, 0, 0, 416, 417, 6, 0, 0, 0, 417, 17, 1, 0, 0, 0, 418, 419, 5, 100, 0, 0, 419, 420, 5, 114, 0, 0, 420, 421, 5, 111, 0, 0, 421, 422, 5, 112, 0, 0, 422, 423, 1, 0, 0, 0, 423, 424, 6, 1, 1, 0, 424, 19, 1, 0, 0, 0, 425, 426, 5, 101, 0, 0, 426, 427, 5, 110, 0, 0, 427, 428, 5, 114, 0, 0, 428, 429, 5, 105, 0, 0, 429, 430, 5, 99, 0, 0, 430, 431, 5, 104, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 6, 2, 2, 0, 433, 21, 1, 0, 0, 0, 434, 435, 5, 101, 0, 0, 435, 436, 5, 118, 0, 0, 436, 437, 5, 97, 0, 0, 437, 438, 5, 108, 0, 0, 438, 439, 1, 0, 0, 0, 439, 440, 6, 3, 0, 0, 440, 23, 1, 0, 0, 0, 441, 442, 5, 101, 0, 0, 442, 443, 5, 120, 0, 0, 443, 444, 5, 112, 0, 0, 444, 445, 5, 108, 0, 0, 445, 446, 5, 97, 0, 0, 446, 447, 5, 105, 0, 0, 447, 448, 5, 110, 0, 0, 448, 449, 1, 0, 0, 0, 449, 450, 6, 4, 3, 0, 450, 25, 1, 0, 0, 0, 451, 452, 5, 102, 0, 0, 452, 453, 5, 114, 0, 0, 453, 454, 5, 111, 0, 0, 454, 455, 5, 109, 0, 0, 455, 456, 1, 0, 0, 0, 456, 457, 6, 5, 4, 0, 457, 27, 1, 0, 0, 0, 458, 459, 5, 103, 0, 0, 459, 460, 5, 114, 0, 0, 460, 461, 5, 111, 0, 0, 461, 462, 5, 107, 0, 0, 462, 463, 1, 0, 0, 0, 463, 464, 6, 6, 0, 0, 464, 29, 1, 0, 0, 0, 465, 466, 5, 105, 0, 0, 466, 467, 5, 110, 0, 0, 467, 468, 5, 108, 0, 0, 468, 469, 5, 105, 0, 0, 469, 470, 5, 110, 0, 0, 470, 471, 5, 101, 0, 0, 471, 472, 5, 115, 0, 0, 472, 473, 5, 116, 0, 0, 473, 474, 5, 97, 0, 0, 474, 475, 5, 116, 0, 0, 475, 476, 5, 115, 0, 0, 476, 477, 1, 0, 0, 0, 477, 478, 6, 7, 0, 0, 478, 31, 1, 0, 0, 0, 479, 480, 5, 107, 0, 0, 480, 481, 5, 101, 0, 0, 481, 482, 5, 101, 0, 0, 482, 483, 5, 112, 0, 0, 483, 484, 1, 0, 0, 0, 484, 485, 6, 8, 1, 0, 485, 33, 1, 0, 0, 0, 486, 487, 5, 108, 0, 0, 487, 488, 5, 105, 0, 0, 488, 489, 5, 109, 0, 0, 489, 490, 5, 105, 0, 0, 490, 491, 5, 116, 0, 0, 491, 492, 1, 0, 0, 0, 492, 493, 6, 9, 0, 0, 493, 35, 1, 0, 0, 0, 494, 495, 5, 108, 0, 0, 495, 496, 5, 111, 0, 0, 496, 497, 5, 111, 0, 0, 497, 498, 5, 107, 0, 0, 498, 499, 5, 117, 0, 0, 499, 500, 5, 112, 0, 0, 500, 501, 1, 0, 0, 0, 501, 502, 6, 10, 5, 0, 502, 37, 1, 0, 0, 0, 503, 504, 5, 109, 0, 0, 504, 505, 5, 101, 0, 0, 505, 506, 5, 116, 0, 0, 506, 507, 5, 97, 0, 0, 507, 508, 1, 0, 0, 0, 508, 509, 6, 11, 6, 0, 509, 39, 1, 0, 0, 0, 510, 511, 5, 109, 0, 0, 511, 512, 5, 101, 0, 0, 512, 513, 5, 116, 0, 0, 513, 514, 5, 114, 0, 0, 514, 515, 5, 105, 0, 0, 515, 516, 5, 99, 0, 0, 516, 517, 5, 115, 0, 0, 517, 518, 1, 0, 0, 0, 518, 519, 6, 12, 7, 0, 519, 41, 1, 0, 0, 0, 520, 521, 5, 109, 0, 0, 521, 522, 5, 118, 0, 0, 522, 523, 5, 95, 0, 0, 523, 524, 5, 101, 0, 0, 524, 525, 5, 120, 0, 0, 525, 526, 5, 112, 0, 0, 526, 527, 5, 97, 0, 0, 527, 528, 5, 110, 0, 0, 528, 529, 5, 100, 0, 0, 529, 530, 1, 0, 0, 0, 530, 531, 6, 13, 8, 0, 531, 43, 1, 0, 0, 0, 532, 533, 5, 114, 0, 0, 533, 534, 5, 101, 0, 0, 534, 535, 5, 110, 0, 0, 535, 536, 5, 97, 0, 0, 536, 537, 5, 109, 0, 0, 537, 538, 5, 101, 0, 0, 538, 539, 1, 0, 0, 0, 539, 540, 6, 14, 9, 0, 540, 45, 1, 0, 0, 0, 541, 542, 5, 114, 0, 0, 542, 543, 5, 111, 0, 0, 543, 544, 5, 119, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 6, 15, 0, 0, 546, 47, 1, 0, 0, 0, 547, 548, 5, 115, 0, 0, 548, 549, 5, 104, 0, 0, 549, 550, 5, 111, 0, 0, 550, 551, 5, 119, 0, 0, 551, 552, 1, 0, 0, 0, 552, 553, 6, 16, 10, 0, 553, 49, 1, 0, 0, 0, 554, 555, 5, 115, 0, 0, 555, 556, 5, 111, 0, 0, 556, 557, 5, 114, 0, 0, 557, 558, 5, 116, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 6, 17, 0, 0, 560, 51, 1, 0, 0, 0, 561, 562, 5, 115, 0, 0, 562, 563, 5, 116, 0, 0, 563, 564, 5, 97, 0, 0, 564, 565, 5, 116, 0, 0, 565, 566, 5, 115, 0, 0, 566, 567, 1, 0, 0, 0, 567, 568, 6, 18, 0, 0, 568, 53, 1, 0, 0, 0, 569, 570, 5, 119, 0, 0, 570, 571, 5, 104, 0, 0, 571, 572, 5, 101, 0, 0, 572, 573, 5, 114, 0, 0, 573, 574, 5, 101, 0, 0, 574, 575, 1, 0, 0, 0, 575, 576, 6, 19, 0, 0, 576, 55, 1, 0, 0, 0, 577, 578, 5, 109, 0, 0, 578, 579, 5, 97, 0, 0, 579, 580, 5, 116, 0, 0, 580, 581, 5, 99, 0, 0, 581, 582, 5, 104, 0, 0, 582, 583, 1, 0, 0, 0, 583, 584, 6, 20, 0, 0, 584, 57, 1, 0, 0, 0, 585, 587, 8, 0, 0, 0, 586, 585, 1, 0, 0, 0, 587, 588, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 590, 1, 0, 0, 0, 590, 591, 6, 21, 0, 0, 591, 59, 1, 0, 0, 0, 592, 593, 5, 47, 0, 0, 593, 594, 5, 47, 0, 0, 594, 598, 1, 0, 0, 0, 595, 597, 8, 1, 0, 0, 596, 595, 1, 0, 0, 0, 597, 600, 1, 0, 0, 0, 598, 596, 1, 0, 0, 0, 598, 599, 1, 0, 0, 0, 599, 602, 1, 0, 0, 0, 600, 598, 1, 0, 0, 0, 601, 603, 5, 13, 0, 0, 602, 601, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 605, 1, 0, 0, 0, 604, 606, 5, 10, 0, 0, 605, 604, 1, 0, 0, 0, 605, 606, 1, 0, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 6, 22, 11, 0, 608, 61, 1, 0, 0, 0, 609, 610, 5, 47, 0, 0, 610, 611, 5, 42, 0, 0, 611, 616, 1, 0, 0, 0, 612, 615, 3, 62, 23, 0, 613, 615, 9, 0, 0, 0, 614, 612, 1, 0, 0, 0, 614, 613, 1, 0, 0, 0, 615, 618, 1, 0, 0, 0, 616, 617, 1, 0, 0, 0, 616, 614, 1, 0, 0, 0, 617, 619, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 619, 620, 5, 42, 0, 0, 620, 621, 5, 47, 0, 0, 621, 622, 1, 0, 0, 0, 622, 623, 6, 23, 11, 0, 623, 63, 1, 0, 0, 0, 624, 626, 7, 2, 0, 0, 625, 624, 1, 0, 0, 0, 626, 627, 1, 0, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 1, 0, 0, 0, 629, 630, 6, 24, 11, 0, 630, 65, 1, 0, 0, 0, 631, 635, 8, 3, 0, 0, 632, 633, 5, 47, 0, 0, 633, 635, 8, 4, 0, 0, 634, 631, 1, 0, 0, 0, 634, 632, 1, 0, 0, 0, 635, 67, 1, 0, 0, 0, 636, 638, 3, 66, 25, 0, 637, 636, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 637, 1, 0, 0, 0, 639, 640, 1, 0, 0, 0, 640, 69, 1, 0, 0, 0, 641, 642, 3, 182, 83, 0, 642, 643, 1, 0, 0, 0, 643, 644, 6, 27, 12, 0, 644, 645, 6, 27, 13, 0, 645, 71, 1, 0, 0, 0, 646, 647, 3, 80, 32, 0, 647, 648, 1, 0, 0, 0, 648, 649, 6, 28, 14, 0, 649, 650, 6, 28, 15, 0, 650, 73, 1, 0, 0, 0, 651, 652, 3, 64, 24, 0, 652, 653, 1, 0, 0, 0, 653, 654, 6, 29, 11, 0, 654, 75, 1, 0, 0, 0, 655, 656, 3, 60, 22, 0, 656, 657, 1, 0, 0, 0, 657, 658, 6, 30, 11, 0, 658, 77, 1, 0, 0, 0, 659, 660, 3, 62, 23, 0, 660, 661, 1, 0, 0, 0, 661, 662, 6, 31, 11, 0, 662, 79, 1, 0, 0, 0, 663, 664, 5, 124, 0, 0, 664, 665, 1, 0, 0, 0, 665, 666, 6, 32, 15, 0, 666, 81, 1, 0, 0, 0, 667, 668, 7, 5, 0, 0, 668, 83, 1, 0, 0, 0, 669, 670, 7, 6, 0, 0, 670, 85, 1, 0, 0, 0, 671, 672, 5, 92, 0, 0, 672, 673, 7, 7, 0, 0, 673, 87, 1, 0, 0, 0, 674, 675, 8, 8, 0, 0, 675, 89, 1, 0, 0, 0, 676, 678, 7, 9, 0, 0, 677, 679, 7, 10, 0, 0, 678, 677, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 681, 1, 0, 0, 0, 680, 682, 3, 82, 33, 0, 681, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 681, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 684, 91, 1, 0, 0, 0, 685, 686, 5, 64, 0, 0, 686, 93, 1, 0, 0, 0, 687, 688, 5, 96, 0, 0, 688, 95, 1, 0, 0, 0, 689, 693, 8, 11, 0, 0, 690, 691, 5, 96, 0, 0, 691, 693, 5, 96, 0, 0, 692, 689, 1, 0, 0, 0, 692, 690, 1, 0, 0, 0, 693, 97, 1, 0, 0, 0, 694, 695, 5, 95, 0, 0, 695, 99, 1, 0, 0, 0, 696, 700, 3, 84, 34, 0, 697, 700, 3, 82, 33, 0, 698, 700, 3, 98, 41, 0, 699, 696, 1, 0, 0, 0, 699, 697, 1, 0, 0, 0, 699, 698, 1, 0, 0, 0, 700, 101, 1, 0, 0, 0, 701, 706, 5, 34, 0, 0, 702, 705, 3, 86, 35, 0, 703, 705, 3, 88, 36, 0, 704, 702, 1, 0, 0, 0, 704, 703, 1, 0, 0, 0, 705, 708, 1, 0, 0, 0, 706, 704, 1, 0, 0, 0, 706, 707, 1, 0, 0, 0, 707, 709, 1, 0, 0, 0, 708, 706, 1, 0, 0, 0, 709, 731, 5, 34, 0, 0, 710, 711, 5, 34, 0, 0, 711, 712, 5, 34, 0, 0, 712, 713, 5, 34, 0, 0, 713, 717, 1, 0, 0, 0, 714, 716, 8, 1, 0, 0, 715, 714, 1, 0, 0, 0, 716, 719, 1, 0, 0, 0, 717, 718, 1, 0, 0, 0, 717, 715, 1, 0, 0, 0, 718, 720, 1, 0, 0, 0, 719, 717, 1, 0, 0, 0, 720, 721, 5, 34, 0, 0, 721, 722, 5, 34, 0, 0, 722, 723, 5, 34, 0, 0, 723, 725, 1, 0, 0, 0, 724, 726, 5, 34, 0, 0, 725, 724, 1, 0, 0, 0, 725, 726, 1, 0, 0, 0, 726, 728, 1, 0, 0, 0, 727, 729, 5, 34, 0, 0, 728, 727, 1, 0, 0, 0, 728, 729, 1, 0, 0, 0, 729, 731, 1, 0, 0, 0, 730, 701, 1, 0, 0, 0, 730, 710, 1, 0, 0, 0, 731, 103, 1, 0, 0, 0, 732, 734, 3, 82, 33, 0, 733, 732, 1, 0, 0, 0, 734, 735, 1, 0, 0, 0, 735, 733, 1, 0, 0, 0, 735, 736, 1, 0, 0, 0, 736, 105, 1, 0, 0, 0, 737, 739, 3, 82, 33, 0, 738, 737, 1, 0, 0, 0, 739, 740, 1, 0, 0, 0, 740, 738, 1, 0, 0, 0, 740, 741, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 746, 3, 122, 53, 0, 743, 745, 3, 82, 33, 0, 744, 743, 1, 0, 0, 0, 745, 748, 1, 0, 0, 0, 746, 744, 1, 0, 0, 0, 746, 747, 1, 0, 0, 0, 747, 780, 1, 0, 0, 0, 748, 746, 1, 0, 0, 0, 749, 751, 3, 122, 53, 0, 750, 752, 3, 82, 33, 0, 751, 750, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 780, 1, 0, 0, 0, 755, 757, 3, 82, 33, 0, 756, 755, 1, 0, 0, 0, 757, 758, 1, 0, 0, 0, 758, 756, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 767, 1, 0, 0, 0, 760, 764, 3, 122, 53, 0, 761, 763, 3, 82, 33, 0, 762, 761, 1, 0, 0, 0, 763, 766, 1, 0, 0, 0, 764, 762, 1, 0, 0, 0, 764, 765, 1, 0, 0, 0, 765, 768, 1, 0, 0, 0, 766, 764, 1, 0, 0, 0, 767, 760, 1, 0, 0, 0, 767, 768, 1, 0, 0, 0, 768, 769, 1, 0, 0, 0, 769, 770, 3, 90, 37, 0, 770, 780, 1, 0, 0, 0, 771, 773, 3, 122, 53, 0, 772, 774, 3, 82, 33, 0, 773, 772, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 773, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 777, 1, 0, 0, 0, 777, 778, 3, 90, 37, 0, 778, 780, 1, 0, 0, 0, 779, 738, 1, 0, 0, 0, 779, 749, 1, 0, 0, 0, 779, 756, 1, 0, 0, 0, 779, 771, 1, 0, 0, 0, 780, 107, 1, 0, 0, 0, 781, 782, 5, 98, 0, 0, 782, 783, 5, 121, 0, 0, 783, 109, 1, 0, 0, 0, 784, 785, 5, 97, 0, 0, 785, 786, 5, 110, 0, 0, 786, 787, 5, 100, 0, 0, 787, 111, 1, 0, 0, 0, 788, 789, 5, 97, 0, 0, 789, 790, 5, 115, 0, 0, 790, 791, 5, 99, 0, 0, 791, 113, 1, 0, 0, 0, 792, 793, 5, 61, 0, 0, 793, 115, 1, 0, 0, 0, 794, 795, 5, 58, 0, 0, 795, 796, 5, 58, 0, 0, 796, 117, 1, 0, 0, 0, 797, 798, 5, 44, 0, 0, 798, 119, 1, 0, 0, 0, 799, 800, 5, 100, 0, 0, 800, 801, 5, 101, 0, 0, 801, 802, 5, 115, 0, 0, 802, 803, 5, 99, 0, 0, 803, 121, 1, 0, 0, 0, 804, 805, 5, 46, 0, 0, 805, 123, 1, 0, 0, 0, 806, 807, 5, 102, 0, 0, 807, 808, 5, 97, 0, 0, 808, 809, 5, 108, 0, 0, 809, 810, 5, 115, 0, 0, 810, 811, 5, 101, 0, 0, 811, 125, 1, 0, 0, 0, 812, 813, 5, 102, 0, 0, 813, 814, 5, 105, 0, 0, 814, 815, 5, 114, 0, 0, 815, 816, 5, 115, 0, 0, 816, 817, 5, 116, 0, 0, 817, 127, 1, 0, 0, 0, 818, 819, 5, 105, 0, 0, 819, 820, 5, 110, 0, 0, 820, 129, 1, 0, 0, 0, 821, 822, 5, 105, 0, 0, 822, 823, 5, 115, 0, 0, 823, 131, 1, 0, 0, 0, 824, 825, 5, 108, 0, 0, 825, 826, 5, 97, 0, 0, 826, 827, 5, 115, 0, 0, 827, 828, 5, 116, 0, 0, 828, 133, 1, 0, 0, 0, 829, 830, 5, 108, 0, 0, 830, 831, 5, 105, 0, 0, 831, 832, 5, 107, 0, 0, 832, 833, 5, 101, 0, 0, 833, 135, 1, 0, 0, 0, 834, 835, 5, 40, 0, 0, 835, 137, 1, 0, 0, 0, 836, 837, 5, 109, 0, 0, 837, 838, 5, 97, 0, 0, 838, 839, 5, 116, 0, 0, 839, 840, 5, 99, 0, 0, 840, 841, 5, 104, 0, 0, 841, 139, 1, 0, 0, 0, 842, 843, 5, 110, 0, 0, 843, 844, 5, 111, 0, 0, 844, 845, 5, 116, 0, 0, 845, 141, 1, 0, 0, 0, 846, 847, 5, 110, 0, 0, 847, 848, 5, 117, 0, 0, 848, 849, 5, 108, 0, 0, 849, 850, 5, 108, 0, 0, 850, 143, 1, 0, 0, 0, 851, 852, 5, 110, 0, 0, 852, 853, 5, 117, 0, 0, 853, 854, 5, 108, 0, 0, 854, 855, 5, 108, 0, 0, 855, 856, 5, 115, 0, 0, 856, 145, 1, 0, 0, 0, 857, 858, 5, 111, 0, 0, 858, 859, 5, 114, 0, 0, 859, 147, 1, 0, 0, 0, 860, 861, 5, 63, 0, 0, 861, 149, 1, 0, 0, 0, 862, 863, 5, 114, 0, 0, 863, 864, 5, 108, 0, 0, 864, 865, 5, 105, 0, 0, 865, 866, 5, 107, 0, 0, 866, 867, 5, 101, 0, 0, 867, 151, 1, 0, 0, 0, 868, 869, 5, 41, 0, 0, 869, 153, 1, 0, 0, 0, 870, 871, 5, 116, 0, 0, 871, 872, 5, 114, 0, 0, 872, 873, 5, 117, 0, 0, 873, 874, 5, 101, 0, 0, 874, 155, 1, 0, 0, 0, 875, 876, 5, 61, 0, 0, 876, 877, 5, 61, 0, 0, 877, 157, 1, 0, 0, 0, 878, 879, 5, 61, 0, 0, 879, 880, 5, 126, 0, 0, 880, 159, 1, 0, 0, 0, 881, 882, 5, 33, 0, 0, 882, 883, 5, 61, 0, 0, 883, 161, 1, 0, 0, 0, 884, 885, 5, 60, 0, 0, 885, 163, 1, 0, 0, 0, 886, 887, 5, 60, 0, 0, 887, 888, 5, 61, 0, 0, 888, 165, 1, 0, 0, 0, 889, 890, 5, 62, 0, 0, 890, 167, 1, 0, 0, 0, 891, 892, 5, 62, 0, 0, 892, 893, 5, 61, 0, 0, 893, 169, 1, 0, 0, 0, 894, 895, 5, 43, 0, 0, 895, 171, 1, 0, 0, 0, 896, 897, 5, 45, 0, 0, 897, 173, 1, 0, 0, 0, 898, 899, 5, 42, 0, 0, 899, 175, 1, 0, 0, 0, 900, 901, 5, 47, 0, 0, 901, 177, 1, 0, 0, 0, 902, 903, 5, 37, 0, 0, 903, 179, 1, 0, 0, 0, 904, 907, 3, 148, 66, 0, 905, 908, 3, 84, 34, 0, 906, 908, 3, 98, 41, 0, 907, 905, 1, 0, 0, 0, 907, 906, 1, 0, 0, 0, 908, 912, 1, 0, 0, 0, 909, 911, 3, 100, 42, 0, 910, 909, 1, 0, 0, 0, 911, 914, 1, 0, 0, 0, 912, 910, 1, 0, 0, 0, 912, 913, 1, 0, 0, 0, 913, 922, 1, 0, 0, 0, 914, 912, 1, 0, 0, 0, 915, 917, 3, 148, 66, 0, 916, 918, 3, 82, 33, 0, 917, 916, 1, 0, 0, 0, 918, 919, 1, 0, 0, 0, 919, 917, 1, 0, 0, 0, 919, 920, 1, 0, 0, 0, 920, 922, 1, 0, 0, 0, 921, 904, 1, 0, 0, 0, 921, 915, 1, 0, 0, 0, 922, 181, 1, 0, 0, 0, 923, 924, 5, 91, 0, 0, 924, 925, 1, 0, 0, 0, 925, 926, 6, 83, 0, 0, 926, 927, 6, 83, 0, 0, 927, 183, 1, 0, 0, 0, 928, 929, 5, 93, 0, 0, 929, 930, 1, 0, 0, 0, 930, 931, 6, 84, 15, 0, 931, 932, 6, 84, 15, 0, 932, 185, 1, 0, 0, 0, 933, 937, 3, 84, 34, 0, 934, 936, 3, 100, 42, 0, 935, 934, 1, 0, 0, 0, 936, 939, 1, 0, 0, 0, 937, 935, 1, 0, 0, 0, 937, 938, 1, 0, 0, 0, 938, 950, 1, 0, 0, 0, 939, 937, 1, 0, 0, 0, 940, 943, 3, 98, 41, 0, 941, 943, 3, 92, 38, 0, 942, 940, 1, 0, 0, 0, 942, 941, 1, 0, 0, 0, 943, 945, 1, 0, 0, 0, 944, 946, 3, 100, 42, 0, 945, 944, 1, 0, 0, 0, 946, 947, 1, 0, 0, 0, 947, 945, 1, 0, 0, 0, 947, 948, 1, 0, 0, 0, 948, 950, 1, 0, 0, 0, 949, 933, 1, 0, 0, 0, 949, 942, 1, 0, 0, 0, 950, 187, 1, 0, 0, 0, 951, 953, 3, 94, 39, 0, 952, 954, 3, 96, 40, 0, 953, 952, 1, 0, 0, 0, 954, 955, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 956, 1, 0, 0, 0, 956, 957, 1, 0, 0, 0, 957, 958, 3, 94, 39, 0, 958, 189, 1, 0, 0, 0, 959, 960, 3, 188, 86, 0, 960, 191, 1, 0, 0, 0, 961, 962, 3, 60, 22, 0, 962, 963, 1, 0, 0, 0, 963, 964, 6, 88, 11, 0, 964, 193, 1, 0, 0, 0, 965, 966, 3, 62, 23, 0, 966, 967, 1, 0, 0, 0, 967, 968, 6, 89, 11, 0, 968, 195, 1, 0, 0, 0, 969, 970, 3, 64, 24, 0, 970, 971, 1, 0, 0, 0, 971, 972, 6, 90, 11, 0, 972, 197, 1, 0, 0, 0, 973, 974, 3, 80, 32, 0, 974, 975, 1, 0, 0, 0, 975, 976, 6, 91, 14, 0, 976, 977, 6, 91, 15, 0, 977, 199, 1, 0, 0, 0, 978, 979, 3, 182, 83, 0, 979, 980, 1, 0, 0, 0, 980, 981, 6, 92, 12, 0, 981, 201, 1, 0, 0, 0, 982, 983, 3, 184, 84, 0, 983, 984, 1, 0, 0, 0, 984, 985, 6, 93, 16, 0, 985, 203, 1, 0, 0, 0, 986, 987, 3, 368, 176, 0, 987, 988, 1, 0, 0, 0, 988, 989, 6, 94, 17, 0, 989, 205, 1, 0, 0, 0, 990, 991, 3, 118, 51, 0, 991, 992, 1, 0, 0, 0, 992, 993, 6, 95, 18, 0, 993, 207, 1, 0, 0, 0, 994, 995, 3, 114, 49, 0, 995, 996, 1, 0, 0, 0, 996, 997, 6, 96, 19, 0, 997, 209, 1, 0, 0, 0, 998, 999, 5, 109, 0, 0, 999, 1000, 5, 101, 0, 0, 1000, 1001, 5, 116, 0, 0, 1001, 1002, 5, 97, 0, 0, 1002, 1003, 5, 100, 0, 0, 1003, 1004, 5, 97, 0, 0, 1004, 1005, 5, 116, 0, 0, 1005, 1006, 5, 97, 0, 0, 1006, 211, 1, 0, 0, 0, 1007, 1008, 3, 68, 26, 0, 1008, 1009, 1, 0, 0, 0, 1009, 1010, 6, 98, 20, 0, 1010, 213, 1, 0, 0, 0, 1011, 1012, 3, 102, 43, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 6, 99, 21, 0, 1014, 215, 1, 0, 0, 0, 1015, 1016, 3, 60, 22, 0, 1016, 1017, 1, 0, 0, 0, 1017, 1018, 6, 100, 11, 0, 1018, 217, 1, 0, 0, 0, 1019, 1020, 3, 62, 23, 0, 1020, 1021, 1, 0, 0, 0, 1021, 1022, 6, 101, 11, 0, 1022, 219, 1, 0, 0, 0, 1023, 1024, 3, 64, 24, 0, 1024, 1025, 1, 0, 0, 0, 1025, 1026, 6, 102, 11, 0, 1026, 221, 1, 0, 0, 0, 1027, 1028, 3, 80, 32, 0, 1028, 1029, 1, 0, 0, 0, 1029, 1030, 6, 103, 14, 0, 1030, 1031, 6, 103, 15, 0, 1031, 223, 1, 0, 0, 0, 1032, 1033, 3, 122, 53, 0, 1033, 1034, 1, 0, 0, 0, 1034, 1035, 6, 104, 22, 0, 1035, 225, 1, 0, 0, 0, 1036, 1037, 3, 118, 51, 0, 1037, 1038, 1, 0, 0, 0, 1038, 1039, 6, 105, 18, 0, 1039, 227, 1, 0, 0, 0, 1040, 1045, 3, 84, 34, 0, 1041, 1045, 3, 82, 33, 0, 1042, 1045, 3, 98, 41, 0, 1043, 1045, 3, 174, 79, 0, 1044, 1040, 1, 0, 0, 0, 1044, 1041, 1, 0, 0, 0, 1044, 1042, 1, 0, 0, 0, 1044, 1043, 1, 0, 0, 0, 1045, 229, 1, 0, 0, 0, 1046, 1049, 3, 84, 34, 0, 1047, 1049, 3, 174, 79, 0, 1048, 1046, 1, 0, 0, 0, 1048, 1047, 1, 0, 0, 0, 1049, 1053, 1, 0, 0, 0, 1050, 1052, 3, 228, 106, 0, 1051, 1050, 1, 0, 0, 0, 1052, 1055, 1, 0, 0, 0, 1053, 1051, 1, 0, 0, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1066, 1, 0, 0, 0, 1055, 1053, 1, 0, 0, 0, 1056, 1059, 3, 98, 41, 0, 1057, 1059, 3, 92, 38, 0, 1058, 1056, 1, 0, 0, 0, 1058, 1057, 1, 0, 0, 0, 1059, 1061, 1, 0, 0, 0, 1060, 1062, 3, 228, 106, 0, 1061, 1060, 1, 0, 0, 0, 1062, 1063, 1, 0, 0, 0, 1063, 1061, 1, 0, 0, 0, 1063, 1064, 1, 0, 0, 0, 1064, 1066, 1, 0, 0, 0, 1065, 1048, 1, 0, 0, 0, 1065, 1058, 1, 0, 0, 0, 1066, 231, 1, 0, 0, 0, 1067, 1070, 3, 230, 107, 0, 1068, 1070, 3, 188, 86, 0, 1069, 1067, 1, 0, 0, 0, 1069, 1068, 1, 0, 0, 0, 1070, 1071, 1, 0, 0, 0, 1071, 1069, 1, 0, 0, 0, 1071, 1072, 1, 0, 0, 0, 1072, 233, 1, 0, 0, 0, 1073, 1074, 3, 60, 22, 0, 1074, 1075, 1, 0, 0, 0, 1075, 1076, 6, 109, 11, 0, 1076, 235, 1, 0, 0, 0, 1077, 1078, 3, 62, 23, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1080, 6, 110, 11, 0, 1080, 237, 1, 0, 0, 0, 1081, 1082, 3, 64, 24, 0, 1082, 1083, 1, 0, 0, 0, 1083, 1084, 6, 111, 11, 0, 1084, 239, 1, 0, 0, 0, 1085, 1086, 3, 80, 32, 0, 1086, 1087, 1, 0, 0, 0, 1087, 1088, 6, 112, 14, 0, 1088, 1089, 6, 112, 15, 0, 1089, 241, 1, 0, 0, 0, 1090, 1091, 3, 114, 49, 0, 1091, 1092, 1, 0, 0, 0, 1092, 1093, 6, 113, 19, 0, 1093, 243, 1, 0, 0, 0, 1094, 1095, 3, 118, 51, 0, 1095, 1096, 1, 0, 0, 0, 1096, 1097, 6, 114, 18, 0, 1097, 245, 1, 0, 0, 0, 1098, 1099, 3, 122, 53, 0, 1099, 1100, 1, 0, 0, 0, 1100, 1101, 6, 115, 22, 0, 1101, 247, 1, 0, 0, 0, 1102, 1103, 5, 97, 0, 0, 1103, 1104, 5, 115, 0, 0, 1104, 249, 1, 0, 0, 0, 1105, 1106, 3, 232, 108, 0, 1106, 1107, 1, 0, 0, 0, 1107, 1108, 6, 117, 23, 0, 1108, 251, 1, 0, 0, 0, 1109, 1110, 3, 60, 22, 0, 1110, 1111, 1, 0, 0, 0, 1111, 1112, 6, 118, 11, 0, 1112, 253, 1, 0, 0, 0, 1113, 1114, 3, 62, 23, 0, 1114, 1115, 1, 0, 0, 0, 1115, 1116, 6, 119, 11, 0, 1116, 255, 1, 0, 0, 0, 1117, 1118, 3, 64, 24, 0, 1118, 1119, 1, 0, 0, 0, 1119, 1120, 6, 120, 11, 0, 1120, 257, 1, 0, 0, 0, 1121, 1122, 3, 80, 32, 0, 1122, 1123, 1, 0, 0, 0, 1123, 1124, 6, 121, 14, 0, 1124, 1125, 6, 121, 15, 0, 1125, 259, 1, 0, 0, 0, 1126, 1127, 3, 182, 83, 0, 1127, 1128, 1, 0, 0, 0, 1128, 1129, 6, 122, 12, 0, 1129, 1130, 6, 122, 24, 0, 1130, 261, 1, 0, 0, 0, 1131, 1132, 5, 111, 0, 0, 1132, 1133, 5, 110, 0, 0, 1133, 1134, 1, 0, 0, 0, 1134, 1135, 6, 123, 25, 0, 1135, 263, 1, 0, 0, 0, 1136, 1137, 5, 119, 0, 0, 1137, 1138, 5, 105, 0, 0, 1138, 1139, 5, 116, 0, 0, 1139, 1140, 5, 104, 0, 0, 1140, 1141, 1, 0, 0, 0, 1141, 1142, 6, 124, 25, 0, 1142, 265, 1, 0, 0, 0, 1143, 1144, 8, 12, 0, 0, 1144, 267, 1, 0, 0, 0, 1145, 1147, 3, 266, 125, 0, 1146, 1145, 1, 0, 0, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1146, 1, 0, 0, 0, 1148, 1149, 1, 0, 0, 0, 1149, 1150, 1, 0, 0, 0, 1150, 1151, 3, 368, 176, 0, 1151, 1153, 1, 0, 0, 0, 1152, 1146, 1, 0, 0, 0, 1152, 1153, 1, 0, 0, 0, 1153, 1155, 1, 0, 0, 0, 1154, 1156, 3, 266, 125, 0, 1155, 1154, 1, 0, 0, 0, 1156, 1157, 1, 0, 0, 0, 1157, 1155, 1, 0, 0, 0, 1157, 1158, 1, 0, 0, 0, 1158, 269, 1, 0, 0, 0, 1159, 1160, 3, 268, 126, 0, 1160, 1161, 1, 0, 0, 0, 1161, 1162, 6, 127, 26, 0, 1162, 271, 1, 0, 0, 0, 1163, 1164, 3, 60, 22, 0, 1164, 1165, 1, 0, 0, 0, 1165, 1166, 6, 128, 11, 0, 1166, 273, 1, 0, 0, 0, 1167, 1168, 3, 62, 23, 0, 1168, 1169, 1, 0, 0, 0, 1169, 1170, 6, 129, 11, 0, 1170, 275, 1, 0, 0, 0, 1171, 1172, 3, 64, 24, 0, 1172, 1173, 1, 0, 0, 0, 1173, 1174, 6, 130, 11, 0, 1174, 277, 1, 0, 0, 0, 1175, 1176, 3, 80, 32, 0, 1176, 1177, 1, 0, 0, 0, 1177, 1178, 6, 131, 14, 0, 1178, 1179, 6, 131, 15, 0, 1179, 1180, 6, 131, 15, 0, 1180, 279, 1, 0, 0, 0, 1181, 1182, 3, 114, 49, 0, 1182, 1183, 1, 0, 0, 0, 1183, 1184, 6, 132, 19, 0, 1184, 281, 1, 0, 0, 0, 1185, 1186, 3, 118, 51, 0, 1186, 1187, 1, 0, 0, 0, 1187, 1188, 6, 133, 18, 0, 1188, 283, 1, 0, 0, 0, 1189, 1190, 3, 122, 53, 0, 1190, 1191, 1, 0, 0, 0, 1191, 1192, 6, 134, 22, 0, 1192, 285, 1, 0, 0, 0, 1193, 1194, 3, 264, 124, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1196, 6, 135, 27, 0, 1196, 287, 1, 0, 0, 0, 1197, 1198, 3, 232, 108, 0, 1198, 1199, 1, 0, 0, 0, 1199, 1200, 6, 136, 23, 0, 1200, 289, 1, 0, 0, 0, 1201, 1202, 3, 190, 87, 0, 1202, 1203, 1, 0, 0, 0, 1203, 1204, 6, 137, 28, 0, 1204, 291, 1, 0, 0, 0, 1205, 1206, 3, 60, 22, 0, 1206, 1207, 1, 0, 0, 0, 1207, 1208, 6, 138, 11, 0, 1208, 293, 1, 0, 0, 0, 1209, 1210, 3, 62, 23, 0, 1210, 1211, 1, 0, 0, 0, 1211, 1212, 6, 139, 11, 0, 1212, 295, 1, 0, 0, 0, 1213, 1214, 3, 64, 24, 0, 1214, 1215, 1, 0, 0, 0, 1215, 1216, 6, 140, 11, 0, 1216, 297, 1, 0, 0, 0, 1217, 1218, 3, 80, 32, 0, 1218, 1219, 1, 0, 0, 0, 1219, 1220, 6, 141, 14, 0, 1220, 1221, 6, 141, 15, 0, 1221, 299, 1, 0, 0, 0, 1222, 1223, 3, 368, 176, 0, 1223, 1224, 1, 0, 0, 0, 1224, 1225, 6, 142, 17, 0, 1225, 301, 1, 0, 0, 0, 1226, 1227, 3, 118, 51, 0, 1227, 1228, 1, 0, 0, 0, 1228, 1229, 6, 143, 18, 0, 1229, 303, 1, 0, 0, 0, 1230, 1231, 3, 122, 53, 0, 1231, 1232, 1, 0, 0, 0, 1232, 1233, 6, 144, 22, 0, 1233, 305, 1, 0, 0, 0, 1234, 1235, 3, 262, 123, 0, 1235, 1236, 1, 0, 0, 0, 1236, 1237, 6, 145, 29, 0, 1237, 1238, 6, 145, 30, 0, 1238, 307, 1, 0, 0, 0, 1239, 1240, 3, 68, 26, 0, 1240, 1241, 1, 0, 0, 0, 1241, 1242, 6, 146, 20, 0, 1242, 309, 1, 0, 0, 0, 1243, 1244, 3, 102, 43, 0, 1244, 1245, 1, 0, 0, 0, 1245, 1246, 6, 147, 21, 0, 1246, 311, 1, 0, 0, 0, 1247, 1248, 3, 60, 22, 0, 1248, 1249, 1, 0, 0, 0, 1249, 1250, 6, 148, 11, 0, 1250, 313, 1, 0, 0, 0, 1251, 1252, 3, 62, 23, 0, 1252, 1253, 1, 0, 0, 0, 1253, 1254, 6, 149, 11, 0, 1254, 315, 1, 0, 0, 0, 1255, 1256, 3, 64, 24, 0, 1256, 1257, 1, 0, 0, 0, 1257, 1258, 6, 150, 11, 0, 1258, 317, 1, 0, 0, 0, 1259, 1260, 3, 80, 32, 0, 1260, 1261, 1, 0, 0, 0, 1261, 1262, 6, 151, 14, 0, 1262, 1263, 6, 151, 15, 0, 1263, 1264, 6, 151, 15, 0, 1264, 319, 1, 0, 0, 0, 1265, 1266, 3, 118, 51, 0, 1266, 1267, 1, 0, 0, 0, 1267, 1268, 6, 152, 18, 0, 1268, 321, 1, 0, 0, 0, 1269, 1270, 3, 122, 53, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1272, 6, 153, 22, 0, 1272, 323, 1, 0, 0, 0, 1273, 1274, 3, 232, 108, 0, 1274, 1275, 1, 0, 0, 0, 1275, 1276, 6, 154, 23, 0, 1276, 325, 1, 0, 0, 0, 1277, 1278, 3, 60, 22, 0, 1278, 1279, 1, 0, 0, 0, 1279, 1280, 6, 155, 11, 0, 1280, 327, 1, 0, 0, 0, 1281, 1282, 3, 62, 23, 0, 1282, 1283, 1, 0, 0, 0, 1283, 1284, 6, 156, 11, 0, 1284, 329, 1, 0, 0, 0, 1285, 1286, 3, 64, 24, 0, 1286, 1287, 1, 0, 0, 0, 1287, 1288, 6, 157, 11, 0, 1288, 331, 1, 0, 0, 0, 1289, 1290, 3, 80, 32, 0, 1290, 1291, 1, 0, 0, 0, 1291, 1292, 6, 158, 14, 0, 1292, 1293, 6, 158, 15, 0, 1293, 333, 1, 0, 0, 0, 1294, 1295, 3, 122, 53, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1297, 6, 159, 22, 0, 1297, 335, 1, 0, 0, 0, 1298, 1299, 3, 190, 87, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 6, 160, 28, 0, 1301, 337, 1, 0, 0, 0, 1302, 1303, 3, 186, 85, 0, 1303, 1304, 1, 0, 0, 0, 1304, 1305, 6, 161, 31, 0, 1305, 339, 1, 0, 0, 0, 1306, 1307, 3, 60, 22, 0, 1307, 1308, 1, 0, 0, 0, 1308, 1309, 6, 162, 11, 0, 1309, 341, 1, 0, 0, 0, 1310, 1311, 3, 62, 23, 0, 1311, 1312, 1, 0, 0, 0, 1312, 1313, 6, 163, 11, 0, 1313, 343, 1, 0, 0, 0, 1314, 1315, 3, 64, 24, 0, 1315, 1316, 1, 0, 0, 0, 1316, 1317, 6, 164, 11, 0, 1317, 345, 1, 0, 0, 0, 1318, 1319, 3, 80, 32, 0, 1319, 1320, 1, 0, 0, 0, 1320, 1321, 6, 165, 14, 0, 1321, 1322, 6, 165, 15, 0, 1322, 347, 1, 0, 0, 0, 1323, 1324, 5, 105, 0, 0, 1324, 1325, 5, 110, 0, 0, 1325, 1326, 5, 102, 0, 0, 1326, 1327, 5, 111, 0, 0, 1327, 349, 1, 0, 0, 0, 1328, 1329, 3, 60, 22, 0, 1329, 1330, 1, 0, 0, 0, 1330, 1331, 6, 167, 11, 0, 1331, 351, 1, 0, 0, 0, 1332, 1333, 3, 62, 23, 0, 1333, 1334, 1, 0, 0, 0, 1334, 1335, 6, 168, 11, 0, 1335, 353, 1, 0, 0, 0, 1336, 1337, 3, 64, 24, 0, 1337, 1338, 1, 0, 0, 0, 1338, 1339, 6, 169, 11, 0, 1339, 355, 1, 0, 0, 0, 1340, 1341, 3, 80, 32, 0, 1341, 1342, 1, 0, 0, 0, 1342, 1343, 6, 170, 14, 0, 1343, 1344, 6, 170, 15, 0, 1344, 357, 1, 0, 0, 0, 1345, 1346, 5, 102, 0, 0, 1346, 1347, 5, 117, 0, 0, 1347, 1348, 5, 110, 0, 0, 1348, 1349, 5, 99, 0, 0, 1349, 1350, 5, 116, 0, 0, 1350, 1351, 5, 105, 0, 0, 1351, 1352, 5, 111, 0, 0, 1352, 1353, 5, 110, 0, 0, 1353, 1354, 5, 115, 0, 0, 1354, 359, 1, 0, 0, 0, 1355, 1356, 3, 60, 22, 0, 1356, 1357, 1, 0, 0, 0, 1357, 1358, 6, 172, 11, 0, 1358, 361, 1, 0, 0, 0, 1359, 1360, 3, 62, 23, 0, 1360, 1361, 1, 0, 0, 0, 1361, 1362, 6, 173, 11, 0, 1362, 363, 1, 0, 0, 0, 1363, 1364, 3, 64, 24, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, 6, 174, 11, 0, 1366, 365, 1, 0, 0, 0, 1367, 1368, 3, 184, 84, 0, 1368, 1369, 1, 0, 0, 0, 1369, 1370, 6, 175, 16, 0, 1370, 1371, 6, 175, 15, 0, 1371, 367, 1, 0, 0, 0, 1372, 1373, 5, 58, 0, 0, 1373, 369, 1, 0, 0, 0, 1374, 1380, 3, 92, 38, 0, 1375, 1380, 3, 82, 33, 0, 1376, 1380, 3, 122, 53, 0, 1377, 1380, 3, 84, 34, 0, 1378, 1380, 3, 98, 41, 0, 1379, 1374, 1, 0, 0, 0, 1379, 1375, 1, 0, 0, 0, 1379, 1376, 1, 0, 0, 0, 1379, 1377, 1, 0, 0, 0, 1379, 1378, 1, 0, 0, 0, 1380, 1381, 1, 0, 0, 0, 1381, 1379, 1, 0, 0, 0, 1381, 1382, 1, 0, 0, 0, 1382, 371, 1, 0, 0, 0, 1383, 1384, 3, 60, 22, 0, 1384, 1385, 1, 0, 0, 0, 1385, 1386, 6, 178, 11, 0, 1386, 373, 1, 0, 0, 0, 1387, 1388, 3, 62, 23, 0, 1388, 1389, 1, 0, 0, 0, 1389, 1390, 6, 179, 11, 0, 1390, 375, 1, 0, 0, 0, 1391, 1392, 3, 64, 24, 0, 1392, 1393, 1, 0, 0, 0, 1393, 1394, 6, 180, 11, 0, 1394, 377, 1, 0, 0, 0, 1395, 1396, 3, 80, 32, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1398, 6, 181, 14, 0, 1398, 1399, 6, 181, 15, 0, 1399, 379, 1, 0, 0, 0, 1400, 1401, 3, 68, 26, 0, 1401, 1402, 1, 0, 0, 0, 1402, 1403, 6, 182, 20, 0, 1403, 1404, 6, 182, 15, 0, 1404, 1405, 6, 182, 32, 0, 1405, 381, 1, 0, 0, 0, 1406, 1407, 3, 102, 43, 0, 1407, 1408, 1, 0, 0, 0, 1408, 1409, 6, 183, 21, 0, 1409, 1410, 6, 183, 15, 0, 1410, 1411, 6, 183, 32, 0, 1411, 383, 1, 0, 0, 0, 1412, 1413, 3, 60, 22, 0, 1413, 1414, 1, 0, 0, 0, 1414, 1415, 6, 184, 11, 0, 1415, 385, 1, 0, 0, 0, 1416, 1417, 3, 62, 23, 0, 1417, 1418, 1, 0, 0, 0, 1418, 1419, 6, 185, 11, 0, 1419, 387, 1, 0, 0, 0, 1420, 1421, 3, 64, 24, 0, 1421, 1422, 1, 0, 0, 0, 1422, 1423, 6, 186, 11, 0, 1423, 389, 1, 0, 0, 0, 1424, 1425, 3, 368, 176, 0, 1425, 1426, 1, 0, 0, 0, 1426, 1427, 6, 187, 17, 0, 1427, 1428, 6, 187, 15, 0, 1428, 1429, 6, 187, 7, 0, 1429, 391, 1, 0, 0, 0, 1430, 1431, 3, 118, 51, 0, 1431, 1432, 1, 0, 0, 0, 1432, 1433, 6, 188, 18, 0, 1433, 1434, 6, 188, 15, 0, 1434, 1435, 6, 188, 7, 0, 1435, 393, 1, 0, 0, 0, 1436, 1437, 3, 60, 22, 0, 1437, 1438, 1, 0, 0, 0, 1438, 1439, 6, 189, 11, 0, 1439, 395, 1, 0, 0, 0, 1440, 1441, 3, 62, 23, 0, 1441, 1442, 1, 0, 0, 0, 1442, 1443, 6, 190, 11, 0, 1443, 397, 1, 0, 0, 0, 1444, 1445, 3, 64, 24, 0, 1445, 1446, 1, 0, 0, 0, 1446, 1447, 6, 191, 11, 0, 1447, 399, 1, 0, 0, 0, 1448, 1449, 3, 190, 87, 0, 1449, 1450, 1, 0, 0, 0, 1450, 1451, 6, 192, 15, 0, 1451, 1452, 6, 192, 0, 0, 1452, 1453, 6, 192, 28, 0, 1453, 401, 1, 0, 0, 0, 1454, 1455, 3, 186, 85, 0, 1455, 1456, 1, 0, 0, 0, 1456, 1457, 6, 193, 15, 0, 1457, 1458, 6, 193, 0, 0, 1458, 1459, 6, 193, 31, 0, 1459, 403, 1, 0, 0, 0, 1460, 1461, 3, 108, 46, 0, 1461, 1462, 1, 0, 0, 0, 1462, 1463, 6, 194, 15, 0, 1463, 1464, 6, 194, 0, 0, 1464, 1465, 6, 194, 33, 0, 1465, 405, 1, 0, 0, 0, 1466, 1467, 3, 80, 32, 0, 1467, 1468, 1, 0, 0, 0, 1468, 1469, 6, 195, 14, 0, 1469, 1470, 6, 195, 15, 0, 1470, 407, 1, 0, 0, 0, 66, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 588, 598, 602, 605, 614, 616, 627, 634, 639, 678, 683, 692, 699, 704, 706, 717, 725, 728, 730, 735, 740, 746, 753, 758, 764, 767, 775, 779, 907, 912, 919, 921, 937, 942, 947, 949, 955, 1044, 1048, 1053, 1058, 1063, 1065, 1069, 1071, 1148, 1152, 1157, 1379, 1381, 34, 5, 2, 0, 5, 4, 0, 5, 6, 0, 5, 1, 0, 5, 3, 0, 5, 8, 0, 5, 12, 0, 5, 14, 0, 5, 10, 0, 5, 5, 0, 5, 11, 0, 0, 1, 0, 7, 71, 0, 5, 0, 0, 7, 30, 0, 4, 0, 0, 7, 72, 0, 7, 116, 0, 7, 39, 0, 7, 37, 0, 7, 26, 0, 7, 31, 0, 7, 41, 0, 7, 82, 0, 5, 13, 0, 5, 7, 0, 7, 92, 0, 7, 91, 0, 7, 74, 0, 7, 90, 0, 5, 9, 0, 7, 73, 0, 5, 15, 0, 7, 34, 0] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java index 831be58254d6e..98760b8595c32 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java @@ -214,7 +214,7 @@ public EsqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000~\u05bc\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ + "\u0004\u0000~\u05bf\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ @@ -335,349 +335,349 @@ public EsqlBaseLexer(CharStream input) { "F\u0001F\u0001G\u0001G\u0001G\u0001H\u0001H\u0001H\u0001I\u0001I\u0001"+ "J\u0001J\u0001J\u0001K\u0001K\u0001L\u0001L\u0001L\u0001M\u0001M\u0001"+ "N\u0001N\u0001O\u0001O\u0001P\u0001P\u0001Q\u0001Q\u0001R\u0001R\u0001"+ - "R\u0005R\u038c\bR\nR\fR\u038f\tR\u0001R\u0001R\u0004R\u0393\bR\u000bR"+ - "\fR\u0394\u0003R\u0397\bR\u0001S\u0001S\u0001S\u0001S\u0001S\u0001T\u0001"+ - "T\u0001T\u0001T\u0001T\u0001U\u0001U\u0005U\u03a5\bU\nU\fU\u03a8\tU\u0001"+ - "U\u0001U\u0003U\u03ac\bU\u0001U\u0004U\u03af\bU\u000bU\fU\u03b0\u0003"+ - "U\u03b3\bU\u0001V\u0001V\u0004V\u03b7\bV\u000bV\fV\u03b8\u0001V\u0001"+ - "V\u0001W\u0001W\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001Y\u0001Y\u0001"+ - "Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001[\u0001[\u0001[\u0001[\u0001"+ - "\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001]\u0001]\u0001^\u0001^\u0001"+ - "^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001"+ - "a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001b\u0001"+ - "b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001c\u0001d\u0001d\u0001d\u0001"+ - "d\u0001e\u0001e\u0001e\u0001e\u0001f\u0001f\u0001f\u0001f\u0001g\u0001"+ - "g\u0001g\u0001g\u0001g\u0001h\u0001h\u0001h\u0001h\u0001i\u0001i\u0001"+ - "i\u0001i\u0001j\u0001j\u0001j\u0001j\u0003j\u0412\bj\u0001k\u0001k\u0003"+ - "k\u0416\bk\u0001k\u0005k\u0419\bk\nk\fk\u041c\tk\u0001k\u0001k\u0003k"+ - "\u0420\bk\u0001k\u0004k\u0423\bk\u000bk\fk\u0424\u0003k\u0427\bk\u0001"+ - "l\u0001l\u0004l\u042b\bl\u000bl\fl\u042c\u0001m\u0001m\u0001m\u0001m\u0001"+ - "n\u0001n\u0001n\u0001n\u0001o\u0001o\u0001o\u0001o\u0001p\u0001p\u0001"+ - "p\u0001p\u0001p\u0001q\u0001q\u0001q\u0001q\u0001r\u0001r\u0001r\u0001"+ - "r\u0001s\u0001s\u0001s\u0001s\u0001t\u0001t\u0001t\u0001u\u0001u\u0001"+ - "u\u0001u\u0001v\u0001v\u0001v\u0001v\u0001w\u0001w\u0001w\u0001w\u0001"+ - "x\u0001x\u0001x\u0001x\u0001y\u0001y\u0001y\u0001y\u0001y\u0001z\u0001"+ - "z\u0001z\u0001z\u0001z\u0001{\u0001{\u0001{\u0001{\u0001{\u0001|\u0001"+ - "|\u0001|\u0001|\u0001|\u0001|\u0001|\u0001}\u0001}\u0001~\u0004~\u0478"+ - "\b~\u000b~\f~\u0479\u0001~\u0001~\u0003~\u047e\b~\u0001~\u0004~\u0481"+ - "\b~\u000b~\f~\u0482\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u007f\u0001"+ - "\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001\u0081\u0001"+ - "\u0081\u0001\u0081\u0001\u0082\u0001\u0082\u0001\u0082\u0001\u0082\u0001"+ - "\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ - "\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0085\u0001\u0085\u0001"+ - "\u0085\u0001\u0085\u0001\u0086\u0001\u0086\u0001\u0086\u0001\u0086\u0001"+ - "\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0088\u0001\u0088\u0001"+ - "\u0088\u0001\u0088\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001"+ - "\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008b\u0001\u008b\u0001"+ - "\u008b\u0001\u008b\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001"+ - "\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008e\u0001"+ - "\u008e\u0001\u008e\u0001\u008e\u0001\u008f\u0001\u008f\u0001\u008f\u0001"+ - "\u008f\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0091\u0001"+ - "\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0092\u0001\u0092\u0001"+ - "\u0092\u0001\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0095\u0001\u0095\u0001"+ - "\u0095\u0001\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ - "\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0099\u0001\u0099\u0001"+ - "\u0099\u0001\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001"+ - "\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009c\u0001\u009c\u0001"+ - "\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d\u0001"+ - "\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009f\u0001"+ - "\u009f\u0001\u009f\u0001\u009f\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a2\u0001"+ - "\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001"+ - "\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a6\u0001\u00a6\u0001"+ - "\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001"+ - "\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001"+ - "\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001"+ - "\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001"+ - "\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001"+ - "\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00b0\u0001"+ - "\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0004"+ - "\u00b1\u0561\b\u00b1\u000b\u00b1\f\u00b1\u0562\u0001\u00b2\u0001\u00b2"+ - "\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3"+ - "\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5"+ - "\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b6\u0001\u00b6\u0001\u00b6"+ - "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b7"+ - "\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8"+ - "\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00ba"+ - "\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00bb\u0001\u00bb\u0001\u00bb"+ - "\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bc\u0001\u00bc\u0001\u00bc"+ - "\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001\u00bd\u0001\u00bd"+ - "\u0001\u00bd\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00bf"+ - "\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00c0\u0001\u00c0\u0001\u00c0"+ - "\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001\u00c1"+ - "\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c2\u0001\u00c2\u0001\u00c2"+ - "\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3\u0001\u00c3\u0001\u00c3"+ - "\u0001\u00c3\u0001\u00c3\u0002\u0268\u02cd\u0000\u00c4\u0010\u0001\u0012"+ - "\u0002\u0014\u0003\u0016\u0004\u0018\u0005\u001a\u0006\u001c\u0007\u001e"+ - "\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u00100\u00112\u00124\u00136\u0014"+ - "8\u0015:\u0016<\u0017>\u0018@\u0019B\u0000D\u001aF\u0000H\u0000J\u001b"+ - "L\u001cN\u001dP\u001eR\u0000T\u0000V\u0000X\u0000Z\u0000\\\u0000^\u0000"+ - "`\u0000b\u0000d\u0000f\u001fh j!l\"n#p$r%t&v\'x(z)|*~+\u0080,\u0082-\u0084"+ - ".\u0086/\u00880\u008a1\u008c2\u008e3\u00904\u00925\u00946\u00967\u0098"+ - "8\u009a9\u009c:\u009e;\u00a0<\u00a2=\u00a4>\u00a6?\u00a8@\u00aaA\u00ac"+ - "B\u00aeC\u00b0D\u00b2E\u00b4F\u00b6G\u00b8H\u00baI\u00bc\u0000\u00beJ"+ - "\u00c0K\u00c2L\u00c4M\u00c6\u0000\u00c8\u0000\u00ca\u0000\u00cc\u0000"+ - "\u00ce\u0000\u00d0\u0000\u00d2N\u00d4\u0000\u00d6\u0000\u00d8O\u00daP"+ - "\u00dcQ\u00de\u0000\u00e0\u0000\u00e2\u0000\u00e4\u0000\u00e6\u0000\u00e8"+ - "R\u00eaS\u00ecT\u00eeU\u00f0\u0000\u00f2\u0000\u00f4\u0000\u00f6\u0000"+ - "\u00f8V\u00fa\u0000\u00fcW\u00feX\u0100Y\u0102\u0000\u0104\u0000\u0106"+ - "Z\u0108[\u010a\u0000\u010c\\\u010e\u0000\u0110]\u0112^\u0114_\u0116\u0000"+ - "\u0118\u0000\u011a\u0000\u011c\u0000\u011e\u0000\u0120\u0000\u0122\u0000"+ - "\u0124`\u0126a\u0128b\u012a\u0000\u012c\u0000\u012e\u0000\u0130\u0000"+ - "\u0132\u0000\u0134\u0000\u0136\u0000\u0138c\u013ad\u013ce\u013e\u0000"+ - "\u0140\u0000\u0142\u0000\u0144\u0000\u0146f\u0148g\u014ah\u014c\u0000"+ - "\u014e\u0000\u0150\u0000\u0152\u0000\u0154i\u0156j\u0158k\u015a\u0000"+ - "\u015cl\u015em\u0160n\u0162o\u0164\u0000\u0166p\u0168q\u016ar\u016cs\u016e"+ - "\u0000\u0170t\u0172u\u0174v\u0176w\u0178x\u017a\u0000\u017c\u0000\u017e"+ - "\u0000\u0180y\u0182z\u0184{\u0186\u0000\u0188\u0000\u018a|\u018c}\u018e"+ - "~\u0190\u0000\u0192\u0000\u0194\u0000\u0196\u0000\u0010\u0000\u0001\u0002"+ - "\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\r\u0006\u0000"+ - "\t\n\r\r //[[]]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r\r \u000b\u0000"+ - "\t\n\r\r \"\",,//::==[[]]||\u0002\u0000**//\u0001\u000009\u0002\u0000"+ - "AZaz\u0005\u0000\"\"\\\\nnrrtt\u0004\u0000\n\n\r\r\"\"\\\\\u0002\u0000"+ - "EEee\u0002\u0000++--\u0001\u0000``\u000b\u0000\t\n\r\r \"#,,//::<<>?"+ - "\\\\||\u05d6\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001\u0000"+ - "\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001\u0000"+ - "\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001\u0000"+ - "\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001\u0000"+ - "\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000\u0000"+ - "\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000\u0000"+ - "(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000,\u0001"+ - "\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000\u0000"+ - "\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000\u0000"+ - "6\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:\u0001"+ - "\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000\u0000"+ - "\u0000\u0000@\u0001\u0000\u0000\u0000\u0000D\u0001\u0000\u0000\u0000\u0001"+ - "F\u0001\u0000\u0000\u0000\u0001H\u0001\u0000\u0000\u0000\u0001J\u0001"+ - "\u0000\u0000\u0000\u0001L\u0001\u0000\u0000\u0000\u0001N\u0001\u0000\u0000"+ - "\u0000\u0002P\u0001\u0000\u0000\u0000\u0002f\u0001\u0000\u0000\u0000\u0002"+ - "h\u0001\u0000\u0000\u0000\u0002j\u0001\u0000\u0000\u0000\u0002l\u0001"+ - "\u0000\u0000\u0000\u0002n\u0001\u0000\u0000\u0000\u0002p\u0001\u0000\u0000"+ - "\u0000\u0002r\u0001\u0000\u0000\u0000\u0002t\u0001\u0000\u0000\u0000\u0002"+ - "v\u0001\u0000\u0000\u0000\u0002x\u0001\u0000\u0000\u0000\u0002z\u0001"+ - "\u0000\u0000\u0000\u0002|\u0001\u0000\u0000\u0000\u0002~\u0001\u0000\u0000"+ - "\u0000\u0002\u0080\u0001\u0000\u0000\u0000\u0002\u0082\u0001\u0000\u0000"+ - "\u0000\u0002\u0084\u0001\u0000\u0000\u0000\u0002\u0086\u0001\u0000\u0000"+ - "\u0000\u0002\u0088\u0001\u0000\u0000\u0000\u0002\u008a\u0001\u0000\u0000"+ - "\u0000\u0002\u008c\u0001\u0000\u0000\u0000\u0002\u008e\u0001\u0000\u0000"+ - "\u0000\u0002\u0090\u0001\u0000\u0000\u0000\u0002\u0092\u0001\u0000\u0000"+ - "\u0000\u0002\u0094\u0001\u0000\u0000\u0000\u0002\u0096\u0001\u0000\u0000"+ - "\u0000\u0002\u0098\u0001\u0000\u0000\u0000\u0002\u009a\u0001\u0000\u0000"+ - "\u0000\u0002\u009c\u0001\u0000\u0000\u0000\u0002\u009e\u0001\u0000\u0000"+ - "\u0000\u0002\u00a0\u0001\u0000\u0000\u0000\u0002\u00a2\u0001\u0000\u0000"+ - "\u0000\u0002\u00a4\u0001\u0000\u0000\u0000\u0002\u00a6\u0001\u0000\u0000"+ - "\u0000\u0002\u00a8\u0001\u0000\u0000\u0000\u0002\u00aa\u0001\u0000\u0000"+ - "\u0000\u0002\u00ac\u0001\u0000\u0000\u0000\u0002\u00ae\u0001\u0000\u0000"+ - "\u0000\u0002\u00b0\u0001\u0000\u0000\u0000\u0002\u00b2\u0001\u0000\u0000"+ - "\u0000\u0002\u00b4\u0001\u0000\u0000\u0000\u0002\u00b6\u0001\u0000\u0000"+ - "\u0000\u0002\u00b8\u0001\u0000\u0000\u0000\u0002\u00ba\u0001\u0000\u0000"+ - "\u0000\u0002\u00be\u0001\u0000\u0000\u0000\u0002\u00c0\u0001\u0000\u0000"+ - "\u0000\u0002\u00c2\u0001\u0000\u0000\u0000\u0002\u00c4\u0001\u0000\u0000"+ - "\u0000\u0003\u00c6\u0001\u0000\u0000\u0000\u0003\u00c8\u0001\u0000\u0000"+ - "\u0000\u0003\u00ca\u0001\u0000\u0000\u0000\u0003\u00cc\u0001\u0000\u0000"+ - "\u0000\u0003\u00ce\u0001\u0000\u0000\u0000\u0003\u00d0\u0001\u0000\u0000"+ - "\u0000\u0003\u00d2\u0001\u0000\u0000\u0000\u0003\u00d4\u0001\u0000\u0000"+ - "\u0000\u0003\u00d6\u0001\u0000\u0000\u0000\u0003\u00d8\u0001\u0000\u0000"+ - "\u0000\u0003\u00da\u0001\u0000\u0000\u0000\u0003\u00dc\u0001\u0000\u0000"+ - "\u0000\u0004\u00de\u0001\u0000\u0000\u0000\u0004\u00e0\u0001\u0000\u0000"+ - "\u0000\u0004\u00e2\u0001\u0000\u0000\u0000\u0004\u00e8\u0001\u0000\u0000"+ - "\u0000\u0004\u00ea\u0001\u0000\u0000\u0000\u0004\u00ec\u0001\u0000\u0000"+ - "\u0000\u0004\u00ee\u0001\u0000\u0000\u0000\u0005\u00f0\u0001\u0000\u0000"+ - "\u0000\u0005\u00f2\u0001\u0000\u0000\u0000\u0005\u00f4\u0001\u0000\u0000"+ - "\u0000\u0005\u00f6\u0001\u0000\u0000\u0000\u0005\u00f8\u0001\u0000\u0000"+ - "\u0000\u0005\u00fa\u0001\u0000\u0000\u0000\u0005\u00fc\u0001\u0000\u0000"+ - "\u0000\u0005\u00fe\u0001\u0000\u0000\u0000\u0005\u0100\u0001\u0000\u0000"+ - "\u0000\u0006\u0102\u0001\u0000\u0000\u0000\u0006\u0104\u0001\u0000\u0000"+ - "\u0000\u0006\u0106\u0001\u0000\u0000\u0000\u0006\u0108\u0001\u0000\u0000"+ - "\u0000\u0006\u010c\u0001\u0000\u0000\u0000\u0006\u010e\u0001\u0000\u0000"+ - "\u0000\u0006\u0110\u0001\u0000\u0000\u0000\u0006\u0112\u0001\u0000\u0000"+ - "\u0000\u0006\u0114\u0001\u0000\u0000\u0000\u0007\u0116\u0001\u0000\u0000"+ - "\u0000\u0007\u0118\u0001\u0000\u0000\u0000\u0007\u011a\u0001\u0000\u0000"+ - "\u0000\u0007\u011c\u0001\u0000\u0000\u0000\u0007\u011e\u0001\u0000\u0000"+ - "\u0000\u0007\u0120\u0001\u0000\u0000\u0000\u0007\u0122\u0001\u0000\u0000"+ - "\u0000\u0007\u0124\u0001\u0000\u0000\u0000\u0007\u0126\u0001\u0000\u0000"+ - "\u0000\u0007\u0128\u0001\u0000\u0000\u0000\b\u012a\u0001\u0000\u0000\u0000"+ - "\b\u012c\u0001\u0000\u0000\u0000\b\u012e\u0001\u0000\u0000\u0000\b\u0130"+ - "\u0001\u0000\u0000\u0000\b\u0132\u0001\u0000\u0000\u0000\b\u0134\u0001"+ - "\u0000\u0000\u0000\b\u0136\u0001\u0000\u0000\u0000\b\u0138\u0001\u0000"+ - "\u0000\u0000\b\u013a\u0001\u0000\u0000\u0000\b\u013c\u0001\u0000\u0000"+ - "\u0000\t\u013e\u0001\u0000\u0000\u0000\t\u0140\u0001\u0000\u0000\u0000"+ - "\t\u0142\u0001\u0000\u0000\u0000\t\u0144\u0001\u0000\u0000\u0000\t\u0146"+ - "\u0001\u0000\u0000\u0000\t\u0148\u0001\u0000\u0000\u0000\t\u014a\u0001"+ - "\u0000\u0000\u0000\n\u014c\u0001\u0000\u0000\u0000\n\u014e\u0001\u0000"+ - "\u0000\u0000\n\u0150\u0001\u0000\u0000\u0000\n\u0152\u0001\u0000\u0000"+ - "\u0000\n\u0154\u0001\u0000\u0000\u0000\n\u0156\u0001\u0000\u0000\u0000"+ - "\n\u0158\u0001\u0000\u0000\u0000\u000b\u015a\u0001\u0000\u0000\u0000\u000b"+ - "\u015c\u0001\u0000\u0000\u0000\u000b\u015e\u0001\u0000\u0000\u0000\u000b"+ - "\u0160\u0001\u0000\u0000\u0000\u000b\u0162\u0001\u0000\u0000\u0000\f\u0164"+ - "\u0001\u0000\u0000\u0000\f\u0166\u0001\u0000\u0000\u0000\f\u0168\u0001"+ - "\u0000\u0000\u0000\f\u016a\u0001\u0000\u0000\u0000\f\u016c\u0001\u0000"+ - "\u0000\u0000\r\u016e\u0001\u0000\u0000\u0000\r\u0170\u0001\u0000\u0000"+ - "\u0000\r\u0172\u0001\u0000\u0000\u0000\r\u0174\u0001\u0000\u0000\u0000"+ - "\r\u0176\u0001\u0000\u0000\u0000\r\u0178\u0001\u0000\u0000\u0000\u000e"+ - "\u017a\u0001\u0000\u0000\u0000\u000e\u017c\u0001\u0000\u0000\u0000\u000e"+ - "\u017e\u0001\u0000\u0000\u0000\u000e\u0180\u0001\u0000\u0000\u0000\u000e"+ - "\u0182\u0001\u0000\u0000\u0000\u000e\u0184\u0001\u0000\u0000\u0000\u000f"+ - "\u0186\u0001\u0000\u0000\u0000\u000f\u0188\u0001\u0000\u0000\u0000\u000f"+ - "\u018a\u0001\u0000\u0000\u0000\u000f\u018c\u0001\u0000\u0000\u0000\u000f"+ - "\u018e\u0001\u0000\u0000\u0000\u000f\u0190\u0001\u0000\u0000\u0000\u000f"+ - "\u0192\u0001\u0000\u0000\u0000\u000f\u0194\u0001\u0000\u0000\u0000\u000f"+ - "\u0196\u0001\u0000\u0000\u0000\u0010\u0198\u0001\u0000\u0000\u0000\u0012"+ - "\u01a2\u0001\u0000\u0000\u0000\u0014\u01a9\u0001\u0000\u0000\u0000\u0016"+ - "\u01b2\u0001\u0000\u0000\u0000\u0018\u01b9\u0001\u0000\u0000\u0000\u001a"+ - "\u01c3\u0001\u0000\u0000\u0000\u001c\u01ca\u0001\u0000\u0000\u0000\u001e"+ - "\u01d1\u0001\u0000\u0000\u0000 \u01df\u0001\u0000\u0000\u0000\"\u01e6"+ - "\u0001\u0000\u0000\u0000$\u01ee\u0001\u0000\u0000\u0000&\u01f7\u0001\u0000"+ - "\u0000\u0000(\u01fe\u0001\u0000\u0000\u0000*\u0208\u0001\u0000\u0000\u0000"+ - ",\u0214\u0001\u0000\u0000\u0000.\u021d\u0001\u0000\u0000\u00000\u0223"+ - "\u0001\u0000\u0000\u00002\u022a\u0001\u0000\u0000\u00004\u0231\u0001\u0000"+ - "\u0000\u00006\u0239\u0001\u0000\u0000\u00008\u0241\u0001\u0000\u0000\u0000"+ - ":\u024a\u0001\u0000\u0000\u0000<\u0250\u0001\u0000\u0000\u0000>\u0261"+ - "\u0001\u0000\u0000\u0000@\u0271\u0001\u0000\u0000\u0000B\u027a\u0001\u0000"+ - "\u0000\u0000D\u027d\u0001\u0000\u0000\u0000F\u0281\u0001\u0000\u0000\u0000"+ - "H\u0286\u0001\u0000\u0000\u0000J\u028b\u0001\u0000\u0000\u0000L\u028f"+ - "\u0001\u0000\u0000\u0000N\u0293\u0001\u0000\u0000\u0000P\u0297\u0001\u0000"+ - "\u0000\u0000R\u029b\u0001\u0000\u0000\u0000T\u029d\u0001\u0000\u0000\u0000"+ - "V\u029f\u0001\u0000\u0000\u0000X\u02a2\u0001\u0000\u0000\u0000Z\u02a4"+ - "\u0001\u0000\u0000\u0000\\\u02ad\u0001\u0000\u0000\u0000^\u02af\u0001"+ - "\u0000\u0000\u0000`\u02b4\u0001\u0000\u0000\u0000b\u02b6\u0001\u0000\u0000"+ - "\u0000d\u02bb\u0001\u0000\u0000\u0000f\u02da\u0001\u0000\u0000\u0000h"+ - "\u02dd\u0001\u0000\u0000\u0000j\u030b\u0001\u0000\u0000\u0000l\u030d\u0001"+ - "\u0000\u0000\u0000n\u0310\u0001\u0000\u0000\u0000p\u0314\u0001\u0000\u0000"+ - "\u0000r\u0318\u0001\u0000\u0000\u0000t\u031a\u0001\u0000\u0000\u0000v"+ - "\u031d\u0001\u0000\u0000\u0000x\u031f\u0001\u0000\u0000\u0000z\u0324\u0001"+ - "\u0000\u0000\u0000|\u0326\u0001\u0000\u0000\u0000~\u032c\u0001\u0000\u0000"+ - "\u0000\u0080\u0332\u0001\u0000\u0000\u0000\u0082\u0335\u0001\u0000\u0000"+ - "\u0000\u0084\u0338\u0001\u0000\u0000\u0000\u0086\u033d\u0001\u0000\u0000"+ - "\u0000\u0088\u0342\u0001\u0000\u0000\u0000\u008a\u0344\u0001\u0000\u0000"+ - "\u0000\u008c\u034a\u0001\u0000\u0000\u0000\u008e\u034e\u0001\u0000\u0000"+ - "\u0000\u0090\u0353\u0001\u0000\u0000\u0000\u0092\u0359\u0001\u0000\u0000"+ - "\u0000\u0094\u035c\u0001\u0000\u0000\u0000\u0096\u035e\u0001\u0000\u0000"+ - "\u0000\u0098\u0364\u0001\u0000\u0000\u0000\u009a\u0366\u0001\u0000\u0000"+ - "\u0000\u009c\u036b\u0001\u0000\u0000\u0000\u009e\u036e\u0001\u0000\u0000"+ - "\u0000\u00a0\u0371\u0001\u0000\u0000\u0000\u00a2\u0374\u0001\u0000\u0000"+ - "\u0000\u00a4\u0376\u0001\u0000\u0000\u0000\u00a6\u0379\u0001\u0000\u0000"+ - "\u0000\u00a8\u037b\u0001\u0000\u0000\u0000\u00aa\u037e\u0001\u0000\u0000"+ - "\u0000\u00ac\u0380\u0001\u0000\u0000\u0000\u00ae\u0382\u0001\u0000\u0000"+ - "\u0000\u00b0\u0384\u0001\u0000\u0000\u0000\u00b2\u0386\u0001\u0000\u0000"+ - "\u0000\u00b4\u0396\u0001\u0000\u0000\u0000\u00b6\u0398\u0001\u0000\u0000"+ - "\u0000\u00b8\u039d\u0001\u0000\u0000\u0000\u00ba\u03b2\u0001\u0000\u0000"+ - "\u0000\u00bc\u03b4\u0001\u0000\u0000\u0000\u00be\u03bc\u0001\u0000\u0000"+ - "\u0000\u00c0\u03be\u0001\u0000\u0000\u0000\u00c2\u03c2\u0001\u0000\u0000"+ - "\u0000\u00c4\u03c6\u0001\u0000\u0000\u0000\u00c6\u03ca\u0001\u0000\u0000"+ - "\u0000\u00c8\u03cf\u0001\u0000\u0000\u0000\u00ca\u03d3\u0001\u0000\u0000"+ - "\u0000\u00cc\u03d7\u0001\u0000\u0000\u0000\u00ce\u03db\u0001\u0000\u0000"+ - "\u0000\u00d0\u03df\u0001\u0000\u0000\u0000\u00d2\u03e3\u0001\u0000\u0000"+ - "\u0000\u00d4\u03ec\u0001\u0000\u0000\u0000\u00d6\u03f0\u0001\u0000\u0000"+ - "\u0000\u00d8\u03f4\u0001\u0000\u0000\u0000\u00da\u03f8\u0001\u0000\u0000"+ - "\u0000\u00dc\u03fc\u0001\u0000\u0000\u0000\u00de\u0400\u0001\u0000\u0000"+ - "\u0000\u00e0\u0405\u0001\u0000\u0000\u0000\u00e2\u0409\u0001\u0000\u0000"+ - "\u0000\u00e4\u0411\u0001\u0000\u0000\u0000\u00e6\u0426\u0001\u0000\u0000"+ - "\u0000\u00e8\u042a\u0001\u0000\u0000\u0000\u00ea\u042e\u0001\u0000\u0000"+ - "\u0000\u00ec\u0432\u0001\u0000\u0000\u0000\u00ee\u0436\u0001\u0000\u0000"+ - "\u0000\u00f0\u043a\u0001\u0000\u0000\u0000\u00f2\u043f\u0001\u0000\u0000"+ - "\u0000\u00f4\u0443\u0001\u0000\u0000\u0000\u00f6\u0447\u0001\u0000\u0000"+ - "\u0000\u00f8\u044b\u0001\u0000\u0000\u0000\u00fa\u044e\u0001\u0000\u0000"+ - "\u0000\u00fc\u0452\u0001\u0000\u0000\u0000\u00fe\u0456\u0001\u0000\u0000"+ - "\u0000\u0100\u045a\u0001\u0000\u0000\u0000\u0102\u045e\u0001\u0000\u0000"+ - "\u0000\u0104\u0463\u0001\u0000\u0000\u0000\u0106\u0468\u0001\u0000\u0000"+ - "\u0000\u0108\u046d\u0001\u0000\u0000\u0000\u010a\u0474\u0001\u0000\u0000"+ - "\u0000\u010c\u047d\u0001\u0000\u0000\u0000\u010e\u0484\u0001\u0000\u0000"+ - "\u0000\u0110\u0488\u0001\u0000\u0000\u0000\u0112\u048c\u0001\u0000\u0000"+ - "\u0000\u0114\u0490\u0001\u0000\u0000\u0000\u0116\u0494\u0001\u0000\u0000"+ - "\u0000\u0118\u049a\u0001\u0000\u0000\u0000\u011a\u049e\u0001\u0000\u0000"+ - "\u0000\u011c\u04a2\u0001\u0000\u0000\u0000\u011e\u04a6\u0001\u0000\u0000"+ - "\u0000\u0120\u04aa\u0001\u0000\u0000\u0000\u0122\u04ae\u0001\u0000\u0000"+ - "\u0000\u0124\u04b2\u0001\u0000\u0000\u0000\u0126\u04b6\u0001\u0000\u0000"+ - "\u0000\u0128\u04ba\u0001\u0000\u0000\u0000\u012a\u04be\u0001\u0000\u0000"+ - "\u0000\u012c\u04c3\u0001\u0000\u0000\u0000\u012e\u04c7\u0001\u0000\u0000"+ - "\u0000\u0130\u04cb\u0001\u0000\u0000\u0000\u0132\u04cf\u0001\u0000\u0000"+ - "\u0000\u0134\u04d4\u0001\u0000\u0000\u0000\u0136\u04d8\u0001\u0000\u0000"+ - "\u0000\u0138\u04dc\u0001\u0000\u0000\u0000\u013a\u04e0\u0001\u0000\u0000"+ - "\u0000\u013c\u04e4\u0001\u0000\u0000\u0000\u013e\u04e8\u0001\u0000\u0000"+ - "\u0000\u0140\u04ee\u0001\u0000\u0000\u0000\u0142\u04f2\u0001\u0000\u0000"+ - "\u0000\u0144\u04f6\u0001\u0000\u0000\u0000\u0146\u04fa\u0001\u0000\u0000"+ - "\u0000\u0148\u04fe\u0001\u0000\u0000\u0000\u014a\u0502\u0001\u0000\u0000"+ - "\u0000\u014c\u0506\u0001\u0000\u0000\u0000\u014e\u050b\u0001\u0000\u0000"+ - "\u0000\u0150\u050f\u0001\u0000\u0000\u0000\u0152\u0513\u0001\u0000\u0000"+ - "\u0000\u0154\u0517\u0001\u0000\u0000\u0000\u0156\u051b\u0001\u0000\u0000"+ - "\u0000\u0158\u051f\u0001\u0000\u0000\u0000\u015a\u0523\u0001\u0000\u0000"+ - "\u0000\u015c\u0528\u0001\u0000\u0000\u0000\u015e\u052d\u0001\u0000\u0000"+ - "\u0000\u0160\u0531\u0001\u0000\u0000\u0000\u0162\u0535\u0001\u0000\u0000"+ - "\u0000\u0164\u0539\u0001\u0000\u0000\u0000\u0166\u053e\u0001\u0000\u0000"+ - "\u0000\u0168\u0548\u0001\u0000\u0000\u0000\u016a\u054c\u0001\u0000\u0000"+ - "\u0000\u016c\u0550\u0001\u0000\u0000\u0000\u016e\u0554\u0001\u0000\u0000"+ - "\u0000\u0170\u0559\u0001\u0000\u0000\u0000\u0172\u0560\u0001\u0000\u0000"+ - "\u0000\u0174\u0564\u0001\u0000\u0000\u0000\u0176\u0568\u0001\u0000\u0000"+ - "\u0000\u0178\u056c\u0001\u0000\u0000\u0000\u017a\u0570\u0001\u0000\u0000"+ - "\u0000\u017c\u0575\u0001\u0000\u0000\u0000\u017e\u057b\u0001\u0000\u0000"+ - "\u0000\u0180\u0581\u0001\u0000\u0000\u0000\u0182\u0585\u0001\u0000\u0000"+ - "\u0000\u0184\u0589\u0001\u0000\u0000\u0000\u0186\u058d\u0001\u0000\u0000"+ - "\u0000\u0188\u0593\u0001\u0000\u0000\u0000\u018a\u0599\u0001\u0000\u0000"+ - "\u0000\u018c\u059d\u0001\u0000\u0000\u0000\u018e\u05a1\u0001\u0000\u0000"+ - "\u0000\u0190\u05a5\u0001\u0000\u0000\u0000\u0192\u05ab\u0001\u0000\u0000"+ - "\u0000\u0194\u05b1\u0001\u0000\u0000\u0000\u0196\u05b7\u0001\u0000\u0000"+ - "\u0000\u0198\u0199\u0005d\u0000\u0000\u0199\u019a\u0005i\u0000\u0000\u019a"+ - "\u019b\u0005s\u0000\u0000\u019b\u019c\u0005s\u0000\u0000\u019c\u019d\u0005"+ - "e\u0000\u0000\u019d\u019e\u0005c\u0000\u0000\u019e\u019f\u0005t\u0000"+ - "\u0000\u019f\u01a0\u0001\u0000\u0000\u0000\u01a0\u01a1\u0006\u0000\u0000"+ - "\u0000\u01a1\u0011\u0001\u0000\u0000\u0000\u01a2\u01a3\u0005d\u0000\u0000"+ - "\u01a3\u01a4\u0005r\u0000\u0000\u01a4\u01a5\u0005o\u0000\u0000\u01a5\u01a6"+ - "\u0005p\u0000\u0000\u01a6\u01a7\u0001\u0000\u0000\u0000\u01a7\u01a8\u0006"+ - "\u0001\u0001\u0000\u01a8\u0013\u0001\u0000\u0000\u0000\u01a9\u01aa\u0005"+ - "e\u0000\u0000\u01aa\u01ab\u0005n\u0000\u0000\u01ab\u01ac\u0005r\u0000"+ - "\u0000\u01ac\u01ad\u0005i\u0000\u0000\u01ad\u01ae\u0005c\u0000\u0000\u01ae"+ - "\u01af\u0005h\u0000\u0000\u01af\u01b0\u0001\u0000\u0000\u0000\u01b0\u01b1"+ - "\u0006\u0002\u0002\u0000\u01b1\u0015\u0001\u0000\u0000\u0000\u01b2\u01b3"+ - "\u0005e\u0000\u0000\u01b3\u01b4\u0005v\u0000\u0000\u01b4\u01b5\u0005a"+ - "\u0000\u0000\u01b5\u01b6\u0005l\u0000\u0000\u01b6\u01b7\u0001\u0000\u0000"+ - "\u0000\u01b7\u01b8\u0006\u0003\u0000\u0000\u01b8\u0017\u0001\u0000\u0000"+ - "\u0000\u01b9\u01ba\u0005e\u0000\u0000\u01ba\u01bb\u0005x\u0000\u0000\u01bb"+ - "\u01bc\u0005p\u0000\u0000\u01bc\u01bd\u0005l\u0000\u0000\u01bd\u01be\u0005"+ - "a\u0000\u0000\u01be\u01bf\u0005i\u0000\u0000\u01bf\u01c0\u0005n\u0000"+ - "\u0000\u01c0\u01c1\u0001\u0000\u0000\u0000\u01c1\u01c2\u0006\u0004\u0003"+ - "\u0000\u01c2\u0019\u0001\u0000\u0000\u0000\u01c3\u01c4\u0005f\u0000\u0000"+ - "\u01c4\u01c5\u0005r\u0000\u0000\u01c5\u01c6\u0005o\u0000\u0000\u01c6\u01c7"+ - "\u0005m\u0000\u0000\u01c7\u01c8\u0001\u0000\u0000\u0000\u01c8\u01c9\u0006"+ - "\u0005\u0004\u0000\u01c9\u001b\u0001\u0000\u0000\u0000\u01ca\u01cb\u0005"+ - "g\u0000\u0000\u01cb\u01cc\u0005r\u0000\u0000\u01cc\u01cd\u0005o\u0000"+ - "\u0000\u01cd\u01ce\u0005k\u0000\u0000\u01ce\u01cf\u0001\u0000\u0000\u0000"+ - "\u01cf\u01d0\u0006\u0006\u0000\u0000\u01d0\u001d\u0001\u0000\u0000\u0000"+ - "\u01d1\u01d2\u0005i\u0000\u0000\u01d2\u01d3\u0005n\u0000\u0000\u01d3\u01d4"+ - "\u0005l\u0000\u0000\u01d4\u01d5\u0005i\u0000\u0000\u01d5\u01d6\u0005n"+ - "\u0000\u0000\u01d6\u01d7\u0005e\u0000\u0000\u01d7\u01d8\u0005s\u0000\u0000"+ - "\u01d8\u01d9\u0005t\u0000\u0000\u01d9\u01da\u0005a\u0000\u0000\u01da\u01db"+ - "\u0005t\u0000\u0000\u01db\u01dc\u0005s\u0000\u0000\u01dc\u01dd\u0001\u0000"+ - "\u0000\u0000\u01dd\u01de\u0006\u0007\u0000\u0000\u01de\u001f\u0001\u0000"+ - "\u0000\u0000\u01df\u01e0\u0005k\u0000\u0000\u01e0\u01e1\u0005e\u0000\u0000"+ - "\u01e1\u01e2\u0005e\u0000\u0000\u01e2\u01e3\u0005p\u0000\u0000\u01e3\u01e4"+ - "\u0001\u0000\u0000\u0000\u01e4\u01e5\u0006\b\u0001\u0000\u01e5!\u0001"+ - "\u0000\u0000\u0000\u01e6\u01e7\u0005l\u0000\u0000\u01e7\u01e8\u0005i\u0000"+ - "\u0000\u01e8\u01e9\u0005m\u0000\u0000\u01e9\u01ea\u0005i\u0000\u0000\u01ea"+ - "\u01eb\u0005t\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000\u0000\u01ec\u01ed"+ - "\u0006\t\u0000\u0000\u01ed#\u0001\u0000\u0000\u0000\u01ee\u01ef\u0005"+ - "l\u0000\u0000\u01ef\u01f0\u0005o\u0000\u0000\u01f0\u01f1\u0005o\u0000"+ - "\u0000\u01f1\u01f2\u0005k\u0000\u0000\u01f2\u01f3\u0005u\u0000\u0000\u01f3"+ - "\u01f4\u0005p\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000\u0000\u01f5\u01f6"+ - "\u0006\n\u0005\u0000\u01f6%\u0001\u0000\u0000\u0000\u01f7\u01f8\u0005"+ - "m\u0000\u0000\u01f8\u01f9\u0005e\u0000\u0000\u01f9\u01fa\u0005t\u0000"+ - "\u0000\u01fa\u01fb\u0005a\u0000\u0000\u01fb\u01fc\u0001\u0000\u0000\u0000"+ - "\u01fc\u01fd\u0006\u000b\u0006\u0000\u01fd\'\u0001\u0000\u0000\u0000\u01fe"+ - "\u01ff\u0005m\u0000\u0000\u01ff\u0200\u0005e\u0000\u0000\u0200\u0201\u0005"+ - "t\u0000\u0000\u0201\u0202\u0005r\u0000\u0000\u0202\u0203\u0005i\u0000"+ - "\u0000\u0203\u0204\u0005c\u0000\u0000\u0204\u0205\u0005s\u0000\u0000\u0205"+ - "\u0206\u0001\u0000\u0000\u0000\u0206\u0207\u0006\f\u0007\u0000\u0207)"+ - "\u0001\u0000\u0000\u0000\u0208\u0209\u0005m\u0000\u0000\u0209\u020a\u0005"+ - "v\u0000\u0000\u020a\u020b\u0005_\u0000\u0000\u020b\u020c\u0005e\u0000"+ - "\u0000\u020c\u020d\u0005x\u0000\u0000\u020d\u020e\u0005p\u0000\u0000\u020e"+ - "\u020f\u0005a\u0000\u0000\u020f\u0210\u0005n\u0000\u0000\u0210\u0211\u0005"+ - "d\u0000\u0000\u0211\u0212\u0001\u0000\u0000\u0000\u0212\u0213\u0006\r"+ - "\b\u0000\u0213+\u0001\u0000\u0000\u0000\u0214\u0215\u0005r\u0000\u0000"+ + "R\u0003R\u038c\bR\u0001R\u0005R\u038f\bR\nR\fR\u0392\tR\u0001R\u0001R"+ + "\u0004R\u0396\bR\u000bR\fR\u0397\u0003R\u039a\bR\u0001S\u0001S\u0001S"+ + "\u0001S\u0001S\u0001T\u0001T\u0001T\u0001T\u0001T\u0001U\u0001U\u0005"+ + "U\u03a8\bU\nU\fU\u03ab\tU\u0001U\u0001U\u0003U\u03af\bU\u0001U\u0004U"+ + "\u03b2\bU\u000bU\fU\u03b3\u0003U\u03b6\bU\u0001V\u0001V\u0004V\u03ba\b"+ + "V\u000bV\fV\u03bb\u0001V\u0001V\u0001W\u0001W\u0001X\u0001X\u0001X\u0001"+ + "X\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001"+ + "[\u0001[\u0001[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001"+ + "]\u0001]\u0001^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001"+ + "`\u0001`\u0001`\u0001`\u0001a\u0001a\u0001a\u0001a\u0001a\u0001a\u0001"+ + "a\u0001a\u0001a\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001c\u0001"+ + "c\u0001d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001e\u0001f\u0001"+ + "f\u0001f\u0001f\u0001g\u0001g\u0001g\u0001g\u0001g\u0001h\u0001h\u0001"+ + "h\u0001h\u0001i\u0001i\u0001i\u0001i\u0001j\u0001j\u0001j\u0001j\u0003"+ + "j\u0415\bj\u0001k\u0001k\u0003k\u0419\bk\u0001k\u0005k\u041c\bk\nk\fk"+ + "\u041f\tk\u0001k\u0001k\u0003k\u0423\bk\u0001k\u0004k\u0426\bk\u000bk"+ + "\fk\u0427\u0003k\u042a\bk\u0001l\u0001l\u0004l\u042e\bl\u000bl\fl\u042f"+ + "\u0001m\u0001m\u0001m\u0001m\u0001n\u0001n\u0001n\u0001n\u0001o\u0001"+ + "o\u0001o\u0001o\u0001p\u0001p\u0001p\u0001p\u0001p\u0001q\u0001q\u0001"+ + "q\u0001q\u0001r\u0001r\u0001r\u0001r\u0001s\u0001s\u0001s\u0001s\u0001"+ + "t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001"+ + "v\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001y\u0001"+ + "y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001z\u0001z\u0001z\u0001{\u0001"+ + "{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001|\u0001|\u0001"+ + "|\u0001}\u0001}\u0001~\u0004~\u047b\b~\u000b~\f~\u047c\u0001~\u0001~\u0003"+ + "~\u0481\b~\u0001~\u0004~\u0484\b~\u000b~\f~\u0485\u0001\u007f\u0001\u007f"+ + "\u0001\u007f\u0001\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080"+ + "\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0082\u0001\u0082"+ + "\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001\u0083"+ + "\u0001\u0083\u0001\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084"+ + "\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0086\u0001\u0086"+ + "\u0001\u0086\u0001\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087"+ + "\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089"+ + "\u0001\u0089\u0001\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a"+ + "\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008c\u0001\u008c"+ + "\u0001\u008c\u0001\u008c\u0001\u008d\u0001\u008d\u0001\u008d\u0001\u008d"+ + "\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008f"+ + "\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0001\u0090"+ + "\u0001\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091"+ + "\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001\u0093"+ + "\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0094"+ + "\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0096\u0001\u0096"+ + "\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097"+ + "\u0001\u0097\u0001\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0098"+ + "\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u009a\u0001\u009a"+ + "\u0001\u009a\u0001\u009a\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009b"+ + "\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d"+ + "\u0001\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e"+ + "\u0001\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0"+ + "\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1"+ + "\u0001\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3"+ + "\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4"+ + "\u0001\u00a4\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5"+ + "\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a6\u0001\u00a7"+ + "\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa"+ + "\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00ab\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac"+ + "\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae"+ + "\u0001\u00ae\u0001\u00ae\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00af"+ + "\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1"+ + "\u0001\u00b1\u0001\u00b1\u0004\u00b1\u0564\b\u00b1\u000b\u00b1\f\u00b1"+ + "\u0565\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001"+ + "\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001"+ + "\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001"+ + "\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001"+ + "\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001\u00b7\u0001"+ + "\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b9\u0001\u00b9\u0001"+ + "\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001"+ + "\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001"+ + "\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001"+ + "\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001\u00be\u0001"+ + "\u00be\u0001\u00be\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001"+ + "\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001"+ + "\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c1\u0001"+ + "\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001"+ + "\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0002\u0268\u02cd"+ + "\u0000\u00c4\u0010\u0001\u0012\u0002\u0014\u0003\u0016\u0004\u0018\u0005"+ + "\u001a\u0006\u001c\u0007\u001e\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u0010"+ + "0\u00112\u00124\u00136\u00148\u0015:\u0016<\u0017>\u0018@\u0019B\u0000"+ + "D\u001aF\u0000H\u0000J\u001bL\u001cN\u001dP\u001eR\u0000T\u0000V\u0000"+ + "X\u0000Z\u0000\\\u0000^\u0000`\u0000b\u0000d\u0000f\u001fh j!l\"n#p$r"+ + "%t&v\'x(z)|*~+\u0080,\u0082-\u0084.\u0086/\u00880\u008a1\u008c2\u008e"+ + "3\u00904\u00925\u00946\u00967\u00988\u009a9\u009c:\u009e;\u00a0<\u00a2"+ + "=\u00a4>\u00a6?\u00a8@\u00aaA\u00acB\u00aeC\u00b0D\u00b2E\u00b4F\u00b6"+ + "G\u00b8H\u00baI\u00bc\u0000\u00beJ\u00c0K\u00c2L\u00c4M\u00c6\u0000\u00c8"+ + "\u0000\u00ca\u0000\u00cc\u0000\u00ce\u0000\u00d0\u0000\u00d2N\u00d4\u0000"+ + "\u00d6\u0000\u00d8O\u00daP\u00dcQ\u00de\u0000\u00e0\u0000\u00e2\u0000"+ + "\u00e4\u0000\u00e6\u0000\u00e8R\u00eaS\u00ecT\u00eeU\u00f0\u0000\u00f2"+ + "\u0000\u00f4\u0000\u00f6\u0000\u00f8V\u00fa\u0000\u00fcW\u00feX\u0100"+ + "Y\u0102\u0000\u0104\u0000\u0106Z\u0108[\u010a\u0000\u010c\\\u010e\u0000"+ + "\u0110]\u0112^\u0114_\u0116\u0000\u0118\u0000\u011a\u0000\u011c\u0000"+ + "\u011e\u0000\u0120\u0000\u0122\u0000\u0124`\u0126a\u0128b\u012a\u0000"+ + "\u012c\u0000\u012e\u0000\u0130\u0000\u0132\u0000\u0134\u0000\u0136\u0000"+ + "\u0138c\u013ad\u013ce\u013e\u0000\u0140\u0000\u0142\u0000\u0144\u0000"+ + "\u0146f\u0148g\u014ah\u014c\u0000\u014e\u0000\u0150\u0000\u0152\u0000"+ + "\u0154i\u0156j\u0158k\u015a\u0000\u015cl\u015em\u0160n\u0162o\u0164\u0000"+ + "\u0166p\u0168q\u016ar\u016cs\u016e\u0000\u0170t\u0172u\u0174v\u0176w\u0178"+ + "x\u017a\u0000\u017c\u0000\u017e\u0000\u0180y\u0182z\u0184{\u0186\u0000"+ + "\u0188\u0000\u018a|\u018c}\u018e~\u0190\u0000\u0192\u0000\u0194\u0000"+ + "\u0196\u0000\u0010\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t"+ + "\n\u000b\f\r\u000e\u000f\r\u0006\u0000\t\n\r\r //[[]]\u0002\u0000\n\n"+ + "\r\r\u0003\u0000\t\n\r\r \u000b\u0000\t\n\r\r \"\",,//::==[[]]||\u0002"+ + "\u0000**//\u0001\u000009\u0002\u0000AZaz\u0005\u0000\"\"\\\\nnrrtt\u0004"+ + "\u0000\n\n\r\r\"\"\\\\\u0002\u0000EEee\u0002\u0000++--\u0001\u0000``\u000b"+ + "\u0000\t\n\r\r \"#,,//::<<>?\\\\||\u05da\u0000\u0010\u0001\u0000\u0000"+ + "\u0000\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000"+ + "\u0000\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000"+ + "\u0000\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000"+ + "\u0000\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000"+ + "\u0000\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000"+ + "&\u0001\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001"+ + "\u0000\u0000\u0000\u0000,\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000"+ + "\u0000\u00000\u0001\u0000\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u0000"+ + "4\u0001\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000\u00008\u0001"+ + "\u0000\u0000\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000"+ + "\u0000\u0000>\u0001\u0000\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000"+ + "D\u0001\u0000\u0000\u0000\u0001F\u0001\u0000\u0000\u0000\u0001H\u0001"+ + "\u0000\u0000\u0000\u0001J\u0001\u0000\u0000\u0000\u0001L\u0001\u0000\u0000"+ + "\u0000\u0001N\u0001\u0000\u0000\u0000\u0002P\u0001\u0000\u0000\u0000\u0002"+ + "f\u0001\u0000\u0000\u0000\u0002h\u0001\u0000\u0000\u0000\u0002j\u0001"+ + "\u0000\u0000\u0000\u0002l\u0001\u0000\u0000\u0000\u0002n\u0001\u0000\u0000"+ + "\u0000\u0002p\u0001\u0000\u0000\u0000\u0002r\u0001\u0000\u0000\u0000\u0002"+ + "t\u0001\u0000\u0000\u0000\u0002v\u0001\u0000\u0000\u0000\u0002x\u0001"+ + "\u0000\u0000\u0000\u0002z\u0001\u0000\u0000\u0000\u0002|\u0001\u0000\u0000"+ + "\u0000\u0002~\u0001\u0000\u0000\u0000\u0002\u0080\u0001\u0000\u0000\u0000"+ + "\u0002\u0082\u0001\u0000\u0000\u0000\u0002\u0084\u0001\u0000\u0000\u0000"+ + "\u0002\u0086\u0001\u0000\u0000\u0000\u0002\u0088\u0001\u0000\u0000\u0000"+ + "\u0002\u008a\u0001\u0000\u0000\u0000\u0002\u008c\u0001\u0000\u0000\u0000"+ + "\u0002\u008e\u0001\u0000\u0000\u0000\u0002\u0090\u0001\u0000\u0000\u0000"+ + "\u0002\u0092\u0001\u0000\u0000\u0000\u0002\u0094\u0001\u0000\u0000\u0000"+ + "\u0002\u0096\u0001\u0000\u0000\u0000\u0002\u0098\u0001\u0000\u0000\u0000"+ + "\u0002\u009a\u0001\u0000\u0000\u0000\u0002\u009c\u0001\u0000\u0000\u0000"+ + "\u0002\u009e\u0001\u0000\u0000\u0000\u0002\u00a0\u0001\u0000\u0000\u0000"+ + "\u0002\u00a2\u0001\u0000\u0000\u0000\u0002\u00a4\u0001\u0000\u0000\u0000"+ + "\u0002\u00a6\u0001\u0000\u0000\u0000\u0002\u00a8\u0001\u0000\u0000\u0000"+ + "\u0002\u00aa\u0001\u0000\u0000\u0000\u0002\u00ac\u0001\u0000\u0000\u0000"+ + "\u0002\u00ae\u0001\u0000\u0000\u0000\u0002\u00b0\u0001\u0000\u0000\u0000"+ + "\u0002\u00b2\u0001\u0000\u0000\u0000\u0002\u00b4\u0001\u0000\u0000\u0000"+ + "\u0002\u00b6\u0001\u0000\u0000\u0000\u0002\u00b8\u0001\u0000\u0000\u0000"+ + "\u0002\u00ba\u0001\u0000\u0000\u0000\u0002\u00be\u0001\u0000\u0000\u0000"+ + "\u0002\u00c0\u0001\u0000\u0000\u0000\u0002\u00c2\u0001\u0000\u0000\u0000"+ + "\u0002\u00c4\u0001\u0000\u0000\u0000\u0003\u00c6\u0001\u0000\u0000\u0000"+ + "\u0003\u00c8\u0001\u0000\u0000\u0000\u0003\u00ca\u0001\u0000\u0000\u0000"+ + "\u0003\u00cc\u0001\u0000\u0000\u0000\u0003\u00ce\u0001\u0000\u0000\u0000"+ + "\u0003\u00d0\u0001\u0000\u0000\u0000\u0003\u00d2\u0001\u0000\u0000\u0000"+ + "\u0003\u00d4\u0001\u0000\u0000\u0000\u0003\u00d6\u0001\u0000\u0000\u0000"+ + "\u0003\u00d8\u0001\u0000\u0000\u0000\u0003\u00da\u0001\u0000\u0000\u0000"+ + "\u0003\u00dc\u0001\u0000\u0000\u0000\u0004\u00de\u0001\u0000\u0000\u0000"+ + "\u0004\u00e0\u0001\u0000\u0000\u0000\u0004\u00e2\u0001\u0000\u0000\u0000"+ + "\u0004\u00e8\u0001\u0000\u0000\u0000\u0004\u00ea\u0001\u0000\u0000\u0000"+ + "\u0004\u00ec\u0001\u0000\u0000\u0000\u0004\u00ee\u0001\u0000\u0000\u0000"+ + "\u0005\u00f0\u0001\u0000\u0000\u0000\u0005\u00f2\u0001\u0000\u0000\u0000"+ + "\u0005\u00f4\u0001\u0000\u0000\u0000\u0005\u00f6\u0001\u0000\u0000\u0000"+ + "\u0005\u00f8\u0001\u0000\u0000\u0000\u0005\u00fa\u0001\u0000\u0000\u0000"+ + "\u0005\u00fc\u0001\u0000\u0000\u0000\u0005\u00fe\u0001\u0000\u0000\u0000"+ + "\u0005\u0100\u0001\u0000\u0000\u0000\u0006\u0102\u0001\u0000\u0000\u0000"+ + "\u0006\u0104\u0001\u0000\u0000\u0000\u0006\u0106\u0001\u0000\u0000\u0000"+ + "\u0006\u0108\u0001\u0000\u0000\u0000\u0006\u010c\u0001\u0000\u0000\u0000"+ + "\u0006\u010e\u0001\u0000\u0000\u0000\u0006\u0110\u0001\u0000\u0000\u0000"+ + "\u0006\u0112\u0001\u0000\u0000\u0000\u0006\u0114\u0001\u0000\u0000\u0000"+ + "\u0007\u0116\u0001\u0000\u0000\u0000\u0007\u0118\u0001\u0000\u0000\u0000"+ + "\u0007\u011a\u0001\u0000\u0000\u0000\u0007\u011c\u0001\u0000\u0000\u0000"+ + "\u0007\u011e\u0001\u0000\u0000\u0000\u0007\u0120\u0001\u0000\u0000\u0000"+ + "\u0007\u0122\u0001\u0000\u0000\u0000\u0007\u0124\u0001\u0000\u0000\u0000"+ + "\u0007\u0126\u0001\u0000\u0000\u0000\u0007\u0128\u0001\u0000\u0000\u0000"+ + "\b\u012a\u0001\u0000\u0000\u0000\b\u012c\u0001\u0000\u0000\u0000\b\u012e"+ + "\u0001\u0000\u0000\u0000\b\u0130\u0001\u0000\u0000\u0000\b\u0132\u0001"+ + "\u0000\u0000\u0000\b\u0134\u0001\u0000\u0000\u0000\b\u0136\u0001\u0000"+ + "\u0000\u0000\b\u0138\u0001\u0000\u0000\u0000\b\u013a\u0001\u0000\u0000"+ + "\u0000\b\u013c\u0001\u0000\u0000\u0000\t\u013e\u0001\u0000\u0000\u0000"+ + "\t\u0140\u0001\u0000\u0000\u0000\t\u0142\u0001\u0000\u0000\u0000\t\u0144"+ + "\u0001\u0000\u0000\u0000\t\u0146\u0001\u0000\u0000\u0000\t\u0148\u0001"+ + "\u0000\u0000\u0000\t\u014a\u0001\u0000\u0000\u0000\n\u014c\u0001\u0000"+ + "\u0000\u0000\n\u014e\u0001\u0000\u0000\u0000\n\u0150\u0001\u0000\u0000"+ + "\u0000\n\u0152\u0001\u0000\u0000\u0000\n\u0154\u0001\u0000\u0000\u0000"+ + "\n\u0156\u0001\u0000\u0000\u0000\n\u0158\u0001\u0000\u0000\u0000\u000b"+ + "\u015a\u0001\u0000\u0000\u0000\u000b\u015c\u0001\u0000\u0000\u0000\u000b"+ + "\u015e\u0001\u0000\u0000\u0000\u000b\u0160\u0001\u0000\u0000\u0000\u000b"+ + "\u0162\u0001\u0000\u0000\u0000\f\u0164\u0001\u0000\u0000\u0000\f\u0166"+ + "\u0001\u0000\u0000\u0000\f\u0168\u0001\u0000\u0000\u0000\f\u016a\u0001"+ + "\u0000\u0000\u0000\f\u016c\u0001\u0000\u0000\u0000\r\u016e\u0001\u0000"+ + "\u0000\u0000\r\u0170\u0001\u0000\u0000\u0000\r\u0172\u0001\u0000\u0000"+ + "\u0000\r\u0174\u0001\u0000\u0000\u0000\r\u0176\u0001\u0000\u0000\u0000"+ + "\r\u0178\u0001\u0000\u0000\u0000\u000e\u017a\u0001\u0000\u0000\u0000\u000e"+ + "\u017c\u0001\u0000\u0000\u0000\u000e\u017e\u0001\u0000\u0000\u0000\u000e"+ + "\u0180\u0001\u0000\u0000\u0000\u000e\u0182\u0001\u0000\u0000\u0000\u000e"+ + "\u0184\u0001\u0000\u0000\u0000\u000f\u0186\u0001\u0000\u0000\u0000\u000f"+ + "\u0188\u0001\u0000\u0000\u0000\u000f\u018a\u0001\u0000\u0000\u0000\u000f"+ + "\u018c\u0001\u0000\u0000\u0000\u000f\u018e\u0001\u0000\u0000\u0000\u000f"+ + "\u0190\u0001\u0000\u0000\u0000\u000f\u0192\u0001\u0000\u0000\u0000\u000f"+ + "\u0194\u0001\u0000\u0000\u0000\u000f\u0196\u0001\u0000\u0000\u0000\u0010"+ + "\u0198\u0001\u0000\u0000\u0000\u0012\u01a2\u0001\u0000\u0000\u0000\u0014"+ + "\u01a9\u0001\u0000\u0000\u0000\u0016\u01b2\u0001\u0000\u0000\u0000\u0018"+ + "\u01b9\u0001\u0000\u0000\u0000\u001a\u01c3\u0001\u0000\u0000\u0000\u001c"+ + "\u01ca\u0001\u0000\u0000\u0000\u001e\u01d1\u0001\u0000\u0000\u0000 \u01df"+ + "\u0001\u0000\u0000\u0000\"\u01e6\u0001\u0000\u0000\u0000$\u01ee\u0001"+ + "\u0000\u0000\u0000&\u01f7\u0001\u0000\u0000\u0000(\u01fe\u0001\u0000\u0000"+ + "\u0000*\u0208\u0001\u0000\u0000\u0000,\u0214\u0001\u0000\u0000\u0000."+ + "\u021d\u0001\u0000\u0000\u00000\u0223\u0001\u0000\u0000\u00002\u022a\u0001"+ + "\u0000\u0000\u00004\u0231\u0001\u0000\u0000\u00006\u0239\u0001\u0000\u0000"+ + "\u00008\u0241\u0001\u0000\u0000\u0000:\u024a\u0001\u0000\u0000\u0000<"+ + "\u0250\u0001\u0000\u0000\u0000>\u0261\u0001\u0000\u0000\u0000@\u0271\u0001"+ + "\u0000\u0000\u0000B\u027a\u0001\u0000\u0000\u0000D\u027d\u0001\u0000\u0000"+ + "\u0000F\u0281\u0001\u0000\u0000\u0000H\u0286\u0001\u0000\u0000\u0000J"+ + "\u028b\u0001\u0000\u0000\u0000L\u028f\u0001\u0000\u0000\u0000N\u0293\u0001"+ + "\u0000\u0000\u0000P\u0297\u0001\u0000\u0000\u0000R\u029b\u0001\u0000\u0000"+ + "\u0000T\u029d\u0001\u0000\u0000\u0000V\u029f\u0001\u0000\u0000\u0000X"+ + "\u02a2\u0001\u0000\u0000\u0000Z\u02a4\u0001\u0000\u0000\u0000\\\u02ad"+ + "\u0001\u0000\u0000\u0000^\u02af\u0001\u0000\u0000\u0000`\u02b4\u0001\u0000"+ + "\u0000\u0000b\u02b6\u0001\u0000\u0000\u0000d\u02bb\u0001\u0000\u0000\u0000"+ + "f\u02da\u0001\u0000\u0000\u0000h\u02dd\u0001\u0000\u0000\u0000j\u030b"+ + "\u0001\u0000\u0000\u0000l\u030d\u0001\u0000\u0000\u0000n\u0310\u0001\u0000"+ + "\u0000\u0000p\u0314\u0001\u0000\u0000\u0000r\u0318\u0001\u0000\u0000\u0000"+ + "t\u031a\u0001\u0000\u0000\u0000v\u031d\u0001\u0000\u0000\u0000x\u031f"+ + "\u0001\u0000\u0000\u0000z\u0324\u0001\u0000\u0000\u0000|\u0326\u0001\u0000"+ + "\u0000\u0000~\u032c\u0001\u0000\u0000\u0000\u0080\u0332\u0001\u0000\u0000"+ + "\u0000\u0082\u0335\u0001\u0000\u0000\u0000\u0084\u0338\u0001\u0000\u0000"+ + "\u0000\u0086\u033d\u0001\u0000\u0000\u0000\u0088\u0342\u0001\u0000\u0000"+ + "\u0000\u008a\u0344\u0001\u0000\u0000\u0000\u008c\u034a\u0001\u0000\u0000"+ + "\u0000\u008e\u034e\u0001\u0000\u0000\u0000\u0090\u0353\u0001\u0000\u0000"+ + "\u0000\u0092\u0359\u0001\u0000\u0000\u0000\u0094\u035c\u0001\u0000\u0000"+ + "\u0000\u0096\u035e\u0001\u0000\u0000\u0000\u0098\u0364\u0001\u0000\u0000"+ + "\u0000\u009a\u0366\u0001\u0000\u0000\u0000\u009c\u036b\u0001\u0000\u0000"+ + "\u0000\u009e\u036e\u0001\u0000\u0000\u0000\u00a0\u0371\u0001\u0000\u0000"+ + "\u0000\u00a2\u0374\u0001\u0000\u0000\u0000\u00a4\u0376\u0001\u0000\u0000"+ + "\u0000\u00a6\u0379\u0001\u0000\u0000\u0000\u00a8\u037b\u0001\u0000\u0000"+ + "\u0000\u00aa\u037e\u0001\u0000\u0000\u0000\u00ac\u0380\u0001\u0000\u0000"+ + "\u0000\u00ae\u0382\u0001\u0000\u0000\u0000\u00b0\u0384\u0001\u0000\u0000"+ + "\u0000\u00b2\u0386\u0001\u0000\u0000\u0000\u00b4\u0399\u0001\u0000\u0000"+ + "\u0000\u00b6\u039b\u0001\u0000\u0000\u0000\u00b8\u03a0\u0001\u0000\u0000"+ + "\u0000\u00ba\u03b5\u0001\u0000\u0000\u0000\u00bc\u03b7\u0001\u0000\u0000"+ + "\u0000\u00be\u03bf\u0001\u0000\u0000\u0000\u00c0\u03c1\u0001\u0000\u0000"+ + "\u0000\u00c2\u03c5\u0001\u0000\u0000\u0000\u00c4\u03c9\u0001\u0000\u0000"+ + "\u0000\u00c6\u03cd\u0001\u0000\u0000\u0000\u00c8\u03d2\u0001\u0000\u0000"+ + "\u0000\u00ca\u03d6\u0001\u0000\u0000\u0000\u00cc\u03da\u0001\u0000\u0000"+ + "\u0000\u00ce\u03de\u0001\u0000\u0000\u0000\u00d0\u03e2\u0001\u0000\u0000"+ + "\u0000\u00d2\u03e6\u0001\u0000\u0000\u0000\u00d4\u03ef\u0001\u0000\u0000"+ + "\u0000\u00d6\u03f3\u0001\u0000\u0000\u0000\u00d8\u03f7\u0001\u0000\u0000"+ + "\u0000\u00da\u03fb\u0001\u0000\u0000\u0000\u00dc\u03ff\u0001\u0000\u0000"+ + "\u0000\u00de\u0403\u0001\u0000\u0000\u0000\u00e0\u0408\u0001\u0000\u0000"+ + "\u0000\u00e2\u040c\u0001\u0000\u0000\u0000\u00e4\u0414\u0001\u0000\u0000"+ + "\u0000\u00e6\u0429\u0001\u0000\u0000\u0000\u00e8\u042d\u0001\u0000\u0000"+ + "\u0000\u00ea\u0431\u0001\u0000\u0000\u0000\u00ec\u0435\u0001\u0000\u0000"+ + "\u0000\u00ee\u0439\u0001\u0000\u0000\u0000\u00f0\u043d\u0001\u0000\u0000"+ + "\u0000\u00f2\u0442\u0001\u0000\u0000\u0000\u00f4\u0446\u0001\u0000\u0000"+ + "\u0000\u00f6\u044a\u0001\u0000\u0000\u0000\u00f8\u044e\u0001\u0000\u0000"+ + "\u0000\u00fa\u0451\u0001\u0000\u0000\u0000\u00fc\u0455\u0001\u0000\u0000"+ + "\u0000\u00fe\u0459\u0001\u0000\u0000\u0000\u0100\u045d\u0001\u0000\u0000"+ + "\u0000\u0102\u0461\u0001\u0000\u0000\u0000\u0104\u0466\u0001\u0000\u0000"+ + "\u0000\u0106\u046b\u0001\u0000\u0000\u0000\u0108\u0470\u0001\u0000\u0000"+ + "\u0000\u010a\u0477\u0001\u0000\u0000\u0000\u010c\u0480\u0001\u0000\u0000"+ + "\u0000\u010e\u0487\u0001\u0000\u0000\u0000\u0110\u048b\u0001\u0000\u0000"+ + "\u0000\u0112\u048f\u0001\u0000\u0000\u0000\u0114\u0493\u0001\u0000\u0000"+ + "\u0000\u0116\u0497\u0001\u0000\u0000\u0000\u0118\u049d\u0001\u0000\u0000"+ + "\u0000\u011a\u04a1\u0001\u0000\u0000\u0000\u011c\u04a5\u0001\u0000\u0000"+ + "\u0000\u011e\u04a9\u0001\u0000\u0000\u0000\u0120\u04ad\u0001\u0000\u0000"+ + "\u0000\u0122\u04b1\u0001\u0000\u0000\u0000\u0124\u04b5\u0001\u0000\u0000"+ + "\u0000\u0126\u04b9\u0001\u0000\u0000\u0000\u0128\u04bd\u0001\u0000\u0000"+ + "\u0000\u012a\u04c1\u0001\u0000\u0000\u0000\u012c\u04c6\u0001\u0000\u0000"+ + "\u0000\u012e\u04ca\u0001\u0000\u0000\u0000\u0130\u04ce\u0001\u0000\u0000"+ + "\u0000\u0132\u04d2\u0001\u0000\u0000\u0000\u0134\u04d7\u0001\u0000\u0000"+ + "\u0000\u0136\u04db\u0001\u0000\u0000\u0000\u0138\u04df\u0001\u0000\u0000"+ + "\u0000\u013a\u04e3\u0001\u0000\u0000\u0000\u013c\u04e7\u0001\u0000\u0000"+ + "\u0000\u013e\u04eb\u0001\u0000\u0000\u0000\u0140\u04f1\u0001\u0000\u0000"+ + "\u0000\u0142\u04f5\u0001\u0000\u0000\u0000\u0144\u04f9\u0001\u0000\u0000"+ + "\u0000\u0146\u04fd\u0001\u0000\u0000\u0000\u0148\u0501\u0001\u0000\u0000"+ + "\u0000\u014a\u0505\u0001\u0000\u0000\u0000\u014c\u0509\u0001\u0000\u0000"+ + "\u0000\u014e\u050e\u0001\u0000\u0000\u0000\u0150\u0512\u0001\u0000\u0000"+ + "\u0000\u0152\u0516\u0001\u0000\u0000\u0000\u0154\u051a\u0001\u0000\u0000"+ + "\u0000\u0156\u051e\u0001\u0000\u0000\u0000\u0158\u0522\u0001\u0000\u0000"+ + "\u0000\u015a\u0526\u0001\u0000\u0000\u0000\u015c\u052b\u0001\u0000\u0000"+ + "\u0000\u015e\u0530\u0001\u0000\u0000\u0000\u0160\u0534\u0001\u0000\u0000"+ + "\u0000\u0162\u0538\u0001\u0000\u0000\u0000\u0164\u053c\u0001\u0000\u0000"+ + "\u0000\u0166\u0541\u0001\u0000\u0000\u0000\u0168\u054b\u0001\u0000\u0000"+ + "\u0000\u016a\u054f\u0001\u0000\u0000\u0000\u016c\u0553\u0001\u0000\u0000"+ + "\u0000\u016e\u0557\u0001\u0000\u0000\u0000\u0170\u055c\u0001\u0000\u0000"+ + "\u0000\u0172\u0563\u0001\u0000\u0000\u0000\u0174\u0567\u0001\u0000\u0000"+ + "\u0000\u0176\u056b\u0001\u0000\u0000\u0000\u0178\u056f\u0001\u0000\u0000"+ + "\u0000\u017a\u0573\u0001\u0000\u0000\u0000\u017c\u0578\u0001\u0000\u0000"+ + "\u0000\u017e\u057e\u0001\u0000\u0000\u0000\u0180\u0584\u0001\u0000\u0000"+ + "\u0000\u0182\u0588\u0001\u0000\u0000\u0000\u0184\u058c\u0001\u0000\u0000"+ + "\u0000\u0186\u0590\u0001\u0000\u0000\u0000\u0188\u0596\u0001\u0000\u0000"+ + "\u0000\u018a\u059c\u0001\u0000\u0000\u0000\u018c\u05a0\u0001\u0000\u0000"+ + "\u0000\u018e\u05a4\u0001\u0000\u0000\u0000\u0190\u05a8\u0001\u0000\u0000"+ + "\u0000\u0192\u05ae\u0001\u0000\u0000\u0000\u0194\u05b4\u0001\u0000\u0000"+ + "\u0000\u0196\u05ba\u0001\u0000\u0000\u0000\u0198\u0199\u0005d\u0000\u0000"+ + "\u0199\u019a\u0005i\u0000\u0000\u019a\u019b\u0005s\u0000\u0000\u019b\u019c"+ + "\u0005s\u0000\u0000\u019c\u019d\u0005e\u0000\u0000\u019d\u019e\u0005c"+ + "\u0000\u0000\u019e\u019f\u0005t\u0000\u0000\u019f\u01a0\u0001\u0000\u0000"+ + "\u0000\u01a0\u01a1\u0006\u0000\u0000\u0000\u01a1\u0011\u0001\u0000\u0000"+ + "\u0000\u01a2\u01a3\u0005d\u0000\u0000\u01a3\u01a4\u0005r\u0000\u0000\u01a4"+ + "\u01a5\u0005o\u0000\u0000\u01a5\u01a6\u0005p\u0000\u0000\u01a6\u01a7\u0001"+ + "\u0000\u0000\u0000\u01a7\u01a8\u0006\u0001\u0001\u0000\u01a8\u0013\u0001"+ + "\u0000\u0000\u0000\u01a9\u01aa\u0005e\u0000\u0000\u01aa\u01ab\u0005n\u0000"+ + "\u0000\u01ab\u01ac\u0005r\u0000\u0000\u01ac\u01ad\u0005i\u0000\u0000\u01ad"+ + "\u01ae\u0005c\u0000\u0000\u01ae\u01af\u0005h\u0000\u0000\u01af\u01b0\u0001"+ + "\u0000\u0000\u0000\u01b0\u01b1\u0006\u0002\u0002\u0000\u01b1\u0015\u0001"+ + "\u0000\u0000\u0000\u01b2\u01b3\u0005e\u0000\u0000\u01b3\u01b4\u0005v\u0000"+ + "\u0000\u01b4\u01b5\u0005a\u0000\u0000\u01b5\u01b6\u0005l\u0000\u0000\u01b6"+ + "\u01b7\u0001\u0000\u0000\u0000\u01b7\u01b8\u0006\u0003\u0000\u0000\u01b8"+ + "\u0017\u0001\u0000\u0000\u0000\u01b9\u01ba\u0005e\u0000\u0000\u01ba\u01bb"+ + "\u0005x\u0000\u0000\u01bb\u01bc\u0005p\u0000\u0000\u01bc\u01bd\u0005l"+ + "\u0000\u0000\u01bd\u01be\u0005a\u0000\u0000\u01be\u01bf\u0005i\u0000\u0000"+ + "\u01bf\u01c0\u0005n\u0000\u0000\u01c0\u01c1\u0001\u0000\u0000\u0000\u01c1"+ + "\u01c2\u0006\u0004\u0003\u0000\u01c2\u0019\u0001\u0000\u0000\u0000\u01c3"+ + "\u01c4\u0005f\u0000\u0000\u01c4\u01c5\u0005r\u0000\u0000\u01c5\u01c6\u0005"+ + "o\u0000\u0000\u01c6\u01c7\u0005m\u0000\u0000\u01c7\u01c8\u0001\u0000\u0000"+ + "\u0000\u01c8\u01c9\u0006\u0005\u0004\u0000\u01c9\u001b\u0001\u0000\u0000"+ + "\u0000\u01ca\u01cb\u0005g\u0000\u0000\u01cb\u01cc\u0005r\u0000\u0000\u01cc"+ + "\u01cd\u0005o\u0000\u0000\u01cd\u01ce\u0005k\u0000\u0000\u01ce\u01cf\u0001"+ + "\u0000\u0000\u0000\u01cf\u01d0\u0006\u0006\u0000\u0000\u01d0\u001d\u0001"+ + "\u0000\u0000\u0000\u01d1\u01d2\u0005i\u0000\u0000\u01d2\u01d3\u0005n\u0000"+ + "\u0000\u01d3\u01d4\u0005l\u0000\u0000\u01d4\u01d5\u0005i\u0000\u0000\u01d5"+ + "\u01d6\u0005n\u0000\u0000\u01d6\u01d7\u0005e\u0000\u0000\u01d7\u01d8\u0005"+ + "s\u0000\u0000\u01d8\u01d9\u0005t\u0000\u0000\u01d9\u01da\u0005a\u0000"+ + "\u0000\u01da\u01db\u0005t\u0000\u0000\u01db\u01dc\u0005s\u0000\u0000\u01dc"+ + "\u01dd\u0001\u0000\u0000\u0000\u01dd\u01de\u0006\u0007\u0000\u0000\u01de"+ + "\u001f\u0001\u0000\u0000\u0000\u01df\u01e0\u0005k\u0000\u0000\u01e0\u01e1"+ + "\u0005e\u0000\u0000\u01e1\u01e2\u0005e\u0000\u0000\u01e2\u01e3\u0005p"+ + "\u0000\u0000\u01e3\u01e4\u0001\u0000\u0000\u0000\u01e4\u01e5\u0006\b\u0001"+ + "\u0000\u01e5!\u0001\u0000\u0000\u0000\u01e6\u01e7\u0005l\u0000\u0000\u01e7"+ + "\u01e8\u0005i\u0000\u0000\u01e8\u01e9\u0005m\u0000\u0000\u01e9\u01ea\u0005"+ + "i\u0000\u0000\u01ea\u01eb\u0005t\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000"+ + "\u0000\u01ec\u01ed\u0006\t\u0000\u0000\u01ed#\u0001\u0000\u0000\u0000"+ + "\u01ee\u01ef\u0005l\u0000\u0000\u01ef\u01f0\u0005o\u0000\u0000\u01f0\u01f1"+ + "\u0005o\u0000\u0000\u01f1\u01f2\u0005k\u0000\u0000\u01f2\u01f3\u0005u"+ + "\u0000\u0000\u01f3\u01f4\u0005p\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000"+ + "\u0000\u01f5\u01f6\u0006\n\u0005\u0000\u01f6%\u0001\u0000\u0000\u0000"+ + "\u01f7\u01f8\u0005m\u0000\u0000\u01f8\u01f9\u0005e\u0000\u0000\u01f9\u01fa"+ + "\u0005t\u0000\u0000\u01fa\u01fb\u0005a\u0000\u0000\u01fb\u01fc\u0001\u0000"+ + "\u0000\u0000\u01fc\u01fd\u0006\u000b\u0006\u0000\u01fd\'\u0001\u0000\u0000"+ + "\u0000\u01fe\u01ff\u0005m\u0000\u0000\u01ff\u0200\u0005e\u0000\u0000\u0200"+ + "\u0201\u0005t\u0000\u0000\u0201\u0202\u0005r\u0000\u0000\u0202\u0203\u0005"+ + "i\u0000\u0000\u0203\u0204\u0005c\u0000\u0000\u0204\u0205\u0005s\u0000"+ + "\u0000\u0205\u0206\u0001\u0000\u0000\u0000\u0206\u0207\u0006\f\u0007\u0000"+ + "\u0207)\u0001\u0000\u0000\u0000\u0208\u0209\u0005m\u0000\u0000\u0209\u020a"+ + "\u0005v\u0000\u0000\u020a\u020b\u0005_\u0000\u0000\u020b\u020c\u0005e"+ + "\u0000\u0000\u020c\u020d\u0005x\u0000\u0000\u020d\u020e\u0005p\u0000\u0000"+ + "\u020e\u020f\u0005a\u0000\u0000\u020f\u0210\u0005n\u0000\u0000\u0210\u0211"+ + "\u0005d\u0000\u0000\u0211\u0212\u0001\u0000\u0000\u0000\u0212\u0213\u0006"+ + "\r\b\u0000\u0213+\u0001\u0000\u0000\u0000\u0214\u0215\u0005r\u0000\u0000"+ "\u0215\u0216\u0005e\u0000\u0000\u0216\u0217\u0005n\u0000\u0000\u0217\u0218"+ "\u0005a\u0000\u0000\u0218\u0219\u0005m\u0000\u0000\u0219\u021a\u0005e"+ "\u0000\u0000\u021a\u021b\u0001\u0000\u0000\u0000\u021b\u021c\u0006\u000e"+ @@ -861,294 +861,296 @@ public EsqlBaseLexer(CharStream input) { "\u0001\u0000\u0000\u0000\u0382\u0383\u0005*\u0000\u0000\u0383\u00af\u0001"+ "\u0000\u0000\u0000\u0384\u0385\u0005/\u0000\u0000\u0385\u00b1\u0001\u0000"+ "\u0000\u0000\u0386\u0387\u0005%\u0000\u0000\u0387\u00b3\u0001\u0000\u0000"+ - "\u0000\u0388\u0389\u0003\u0094B\u0000\u0389\u038d\u0003T\"\u0000\u038a"+ - "\u038c\u0003d*\u0000\u038b\u038a\u0001\u0000\u0000\u0000\u038c\u038f\u0001"+ - "\u0000\u0000\u0000\u038d\u038b\u0001\u0000\u0000\u0000\u038d\u038e\u0001"+ - "\u0000\u0000\u0000\u038e\u0397\u0001\u0000\u0000\u0000\u038f\u038d\u0001"+ - "\u0000\u0000\u0000\u0390\u0392\u0003\u0094B\u0000\u0391\u0393\u0003R!"+ - "\u0000\u0392\u0391\u0001\u0000\u0000\u0000\u0393\u0394\u0001\u0000\u0000"+ - "\u0000\u0394\u0392\u0001\u0000\u0000\u0000\u0394\u0395\u0001\u0000\u0000"+ - "\u0000\u0395\u0397\u0001\u0000\u0000\u0000\u0396\u0388\u0001\u0000\u0000"+ - "\u0000\u0396\u0390\u0001\u0000\u0000\u0000\u0397\u00b5\u0001\u0000\u0000"+ - "\u0000\u0398\u0399\u0005[\u0000\u0000\u0399\u039a\u0001\u0000\u0000\u0000"+ - "\u039a\u039b\u0006S\u0000\u0000\u039b\u039c\u0006S\u0000\u0000\u039c\u00b7"+ - "\u0001\u0000\u0000\u0000\u039d\u039e\u0005]\u0000\u0000\u039e\u039f\u0001"+ - "\u0000\u0000\u0000\u039f\u03a0\u0006T\u000f\u0000\u03a0\u03a1\u0006T\u000f"+ - "\u0000\u03a1\u00b9\u0001\u0000\u0000\u0000\u03a2\u03a6\u0003T\"\u0000"+ - "\u03a3\u03a5\u0003d*\u0000\u03a4\u03a3\u0001\u0000\u0000\u0000\u03a5\u03a8"+ - "\u0001\u0000\u0000\u0000\u03a6\u03a4\u0001\u0000\u0000\u0000\u03a6\u03a7"+ - "\u0001\u0000\u0000\u0000\u03a7\u03b3\u0001\u0000\u0000\u0000\u03a8\u03a6"+ - "\u0001\u0000\u0000\u0000\u03a9\u03ac\u0003b)\u0000\u03aa\u03ac\u0003\\"+ - "&\u0000\u03ab\u03a9\u0001\u0000\u0000\u0000\u03ab\u03aa\u0001\u0000\u0000"+ - "\u0000\u03ac\u03ae\u0001\u0000\u0000\u0000\u03ad\u03af\u0003d*\u0000\u03ae"+ - "\u03ad\u0001\u0000\u0000\u0000\u03af\u03b0\u0001\u0000\u0000\u0000\u03b0"+ - "\u03ae\u0001\u0000\u0000\u0000\u03b0\u03b1\u0001\u0000\u0000\u0000\u03b1"+ - "\u03b3\u0001\u0000\u0000\u0000\u03b2\u03a2\u0001\u0000\u0000\u0000\u03b2"+ - "\u03ab\u0001\u0000\u0000\u0000\u03b3\u00bb\u0001\u0000\u0000\u0000\u03b4"+ - "\u03b6\u0003^\'\u0000\u03b5\u03b7\u0003`(\u0000\u03b6\u03b5\u0001\u0000"+ - "\u0000\u0000\u03b7\u03b8\u0001\u0000\u0000\u0000\u03b8\u03b6\u0001\u0000"+ - "\u0000\u0000\u03b8\u03b9\u0001\u0000\u0000\u0000\u03b9\u03ba\u0001\u0000"+ - "\u0000\u0000\u03ba\u03bb\u0003^\'\u0000\u03bb\u00bd\u0001\u0000\u0000"+ - "\u0000\u03bc\u03bd\u0003\u00bcV\u0000\u03bd\u00bf\u0001\u0000\u0000\u0000"+ - "\u03be\u03bf\u0003<\u0016\u0000\u03bf\u03c0\u0001\u0000\u0000\u0000\u03c0"+ - "\u03c1\u0006X\u000b\u0000\u03c1\u00c1\u0001\u0000\u0000\u0000\u03c2\u03c3"+ - "\u0003>\u0017\u0000\u03c3\u03c4\u0001\u0000\u0000\u0000\u03c4\u03c5\u0006"+ - "Y\u000b\u0000\u03c5\u00c3\u0001\u0000\u0000\u0000\u03c6\u03c7\u0003@\u0018"+ - "\u0000\u03c7\u03c8\u0001\u0000\u0000\u0000\u03c8\u03c9\u0006Z\u000b\u0000"+ - "\u03c9\u00c5\u0001\u0000\u0000\u0000\u03ca\u03cb\u0003P \u0000\u03cb\u03cc"+ - "\u0001\u0000\u0000\u0000\u03cc\u03cd\u0006[\u000e\u0000\u03cd\u03ce\u0006"+ - "[\u000f\u0000\u03ce\u00c7\u0001\u0000\u0000\u0000\u03cf\u03d0\u0003\u00b6"+ - "S\u0000\u03d0\u03d1\u0001\u0000\u0000\u0000\u03d1\u03d2\u0006\\\f\u0000"+ - "\u03d2\u00c9\u0001\u0000\u0000\u0000\u03d3\u03d4\u0003\u00b8T\u0000\u03d4"+ - "\u03d5\u0001\u0000\u0000\u0000\u03d5\u03d6\u0006]\u0010\u0000\u03d6\u00cb"+ - "\u0001\u0000\u0000\u0000\u03d7\u03d8\u0003\u0170\u00b0\u0000\u03d8\u03d9"+ - "\u0001\u0000\u0000\u0000\u03d9\u03da\u0006^\u0011\u0000\u03da\u00cd\u0001"+ - "\u0000\u0000\u0000\u03db\u03dc\u0003v3\u0000\u03dc\u03dd\u0001\u0000\u0000"+ - "\u0000\u03dd\u03de\u0006_\u0012\u0000\u03de\u00cf\u0001\u0000\u0000\u0000"+ - "\u03df\u03e0\u0003r1\u0000\u03e0\u03e1\u0001\u0000\u0000\u0000\u03e1\u03e2"+ - "\u0006`\u0013\u0000\u03e2\u00d1\u0001\u0000\u0000\u0000\u03e3\u03e4\u0005"+ - "m\u0000\u0000\u03e4\u03e5\u0005e\u0000\u0000\u03e5\u03e6\u0005t\u0000"+ - "\u0000\u03e6\u03e7\u0005a\u0000\u0000\u03e7\u03e8\u0005d\u0000\u0000\u03e8"+ - "\u03e9\u0005a\u0000\u0000\u03e9\u03ea\u0005t\u0000\u0000\u03ea\u03eb\u0005"+ - "a\u0000\u0000\u03eb\u00d3\u0001\u0000\u0000\u0000\u03ec\u03ed\u0003D\u001a"+ - "\u0000\u03ed\u03ee\u0001\u0000\u0000\u0000\u03ee\u03ef\u0006b\u0014\u0000"+ - "\u03ef\u00d5\u0001\u0000\u0000\u0000\u03f0\u03f1\u0003f+\u0000\u03f1\u03f2"+ - "\u0001\u0000\u0000\u0000\u03f2\u03f3\u0006c\u0015\u0000\u03f3\u00d7\u0001"+ - "\u0000\u0000\u0000\u03f4\u03f5\u0003<\u0016\u0000\u03f5\u03f6\u0001\u0000"+ - "\u0000\u0000\u03f6\u03f7\u0006d\u000b\u0000\u03f7\u00d9\u0001\u0000\u0000"+ - "\u0000\u03f8\u03f9\u0003>\u0017\u0000\u03f9\u03fa\u0001\u0000\u0000\u0000"+ - "\u03fa\u03fb\u0006e\u000b\u0000\u03fb\u00db\u0001\u0000\u0000\u0000\u03fc"+ - "\u03fd\u0003@\u0018\u0000\u03fd\u03fe\u0001\u0000\u0000\u0000\u03fe\u03ff"+ - "\u0006f\u000b\u0000\u03ff\u00dd\u0001\u0000\u0000\u0000\u0400\u0401\u0003"+ - "P \u0000\u0401\u0402\u0001\u0000\u0000\u0000\u0402\u0403\u0006g\u000e"+ - "\u0000\u0403\u0404\u0006g\u000f\u0000\u0404\u00df\u0001\u0000\u0000\u0000"+ - "\u0405\u0406\u0003z5\u0000\u0406\u0407\u0001\u0000\u0000\u0000\u0407\u0408"+ - "\u0006h\u0016\u0000\u0408\u00e1\u0001\u0000\u0000\u0000\u0409\u040a\u0003"+ - "v3\u0000\u040a\u040b\u0001\u0000\u0000\u0000\u040b\u040c\u0006i\u0012"+ - "\u0000\u040c\u00e3\u0001\u0000\u0000\u0000\u040d\u0412\u0003T\"\u0000"+ - "\u040e\u0412\u0003R!\u0000\u040f\u0412\u0003b)\u0000\u0410\u0412\u0003"+ - "\u00aeO\u0000\u0411\u040d\u0001\u0000\u0000\u0000\u0411\u040e\u0001\u0000"+ - "\u0000\u0000\u0411\u040f\u0001\u0000\u0000\u0000\u0411\u0410\u0001\u0000"+ - "\u0000\u0000\u0412\u00e5\u0001\u0000\u0000\u0000\u0413\u0416\u0003T\""+ - "\u0000\u0414\u0416\u0003\u00aeO\u0000\u0415\u0413\u0001\u0000\u0000\u0000"+ - "\u0415\u0414\u0001\u0000\u0000\u0000\u0416\u041a\u0001\u0000\u0000\u0000"+ - "\u0417\u0419\u0003\u00e4j\u0000\u0418\u0417\u0001\u0000\u0000\u0000\u0419"+ - "\u041c\u0001\u0000\u0000\u0000\u041a\u0418\u0001\u0000\u0000\u0000\u041a"+ - "\u041b\u0001\u0000\u0000\u0000\u041b\u0427\u0001\u0000\u0000\u0000\u041c"+ - "\u041a\u0001\u0000\u0000\u0000\u041d\u0420\u0003b)\u0000\u041e\u0420\u0003"+ - "\\&\u0000\u041f\u041d\u0001\u0000\u0000\u0000\u041f\u041e\u0001\u0000"+ - "\u0000\u0000\u0420\u0422\u0001\u0000\u0000\u0000\u0421\u0423\u0003\u00e4"+ - "j\u0000\u0422\u0421\u0001\u0000\u0000\u0000\u0423\u0424\u0001\u0000\u0000"+ - "\u0000\u0424\u0422\u0001\u0000\u0000\u0000\u0424\u0425\u0001\u0000\u0000"+ - "\u0000\u0425\u0427\u0001\u0000\u0000\u0000\u0426\u0415\u0001\u0000\u0000"+ - "\u0000\u0426\u041f\u0001\u0000\u0000\u0000\u0427\u00e7\u0001\u0000\u0000"+ - "\u0000\u0428\u042b\u0003\u00e6k\u0000\u0429\u042b\u0003\u00bcV\u0000\u042a"+ - "\u0428\u0001\u0000\u0000\u0000\u042a\u0429\u0001\u0000\u0000\u0000\u042b"+ - "\u042c\u0001\u0000\u0000\u0000\u042c\u042a\u0001\u0000\u0000\u0000\u042c"+ - "\u042d\u0001\u0000\u0000\u0000\u042d\u00e9\u0001\u0000\u0000\u0000\u042e"+ - "\u042f\u0003<\u0016\u0000\u042f\u0430\u0001\u0000\u0000\u0000\u0430\u0431"+ - "\u0006m\u000b\u0000\u0431\u00eb\u0001\u0000\u0000\u0000\u0432\u0433\u0003"+ - ">\u0017\u0000\u0433\u0434\u0001\u0000\u0000\u0000\u0434\u0435\u0006n\u000b"+ - "\u0000\u0435\u00ed\u0001\u0000\u0000\u0000\u0436\u0437\u0003@\u0018\u0000"+ - "\u0437\u0438\u0001\u0000\u0000\u0000\u0438\u0439\u0006o\u000b\u0000\u0439"+ - "\u00ef\u0001\u0000\u0000\u0000\u043a\u043b\u0003P \u0000\u043b\u043c\u0001"+ - "\u0000\u0000\u0000\u043c\u043d\u0006p\u000e\u0000\u043d\u043e\u0006p\u000f"+ - "\u0000\u043e\u00f1\u0001\u0000\u0000\u0000\u043f\u0440\u0003r1\u0000\u0440"+ - "\u0441\u0001\u0000\u0000\u0000\u0441\u0442\u0006q\u0013\u0000\u0442\u00f3"+ - "\u0001\u0000\u0000\u0000\u0443\u0444\u0003v3\u0000\u0444\u0445\u0001\u0000"+ - "\u0000\u0000\u0445\u0446\u0006r\u0012\u0000\u0446\u00f5\u0001\u0000\u0000"+ - "\u0000\u0447\u0448\u0003z5\u0000\u0448\u0449\u0001\u0000\u0000\u0000\u0449"+ - "\u044a\u0006s\u0016\u0000\u044a\u00f7\u0001\u0000\u0000\u0000\u044b\u044c"+ - "\u0005a\u0000\u0000\u044c\u044d\u0005s\u0000\u0000\u044d\u00f9\u0001\u0000"+ - "\u0000\u0000\u044e\u044f\u0003\u00e8l\u0000\u044f\u0450\u0001\u0000\u0000"+ - "\u0000\u0450\u0451\u0006u\u0017\u0000\u0451\u00fb\u0001\u0000\u0000\u0000"+ - "\u0452\u0453\u0003<\u0016\u0000\u0453\u0454\u0001\u0000\u0000\u0000\u0454"+ - "\u0455\u0006v\u000b\u0000\u0455\u00fd\u0001\u0000\u0000\u0000\u0456\u0457"+ - "\u0003>\u0017\u0000\u0457\u0458\u0001\u0000\u0000\u0000\u0458\u0459\u0006"+ - "w\u000b\u0000\u0459\u00ff\u0001\u0000\u0000\u0000\u045a\u045b\u0003@\u0018"+ - "\u0000\u045b\u045c\u0001\u0000\u0000\u0000\u045c\u045d\u0006x\u000b\u0000"+ - "\u045d\u0101\u0001\u0000\u0000\u0000\u045e\u045f\u0003P \u0000\u045f\u0460"+ - "\u0001\u0000\u0000\u0000\u0460\u0461\u0006y\u000e\u0000\u0461\u0462\u0006"+ - "y\u000f\u0000\u0462\u0103\u0001\u0000\u0000\u0000\u0463\u0464\u0003\u00b6"+ - "S\u0000\u0464\u0465\u0001\u0000\u0000\u0000\u0465\u0466\u0006z\f\u0000"+ - "\u0466\u0467\u0006z\u0018\u0000\u0467\u0105\u0001\u0000\u0000\u0000\u0468"+ - "\u0469\u0005o\u0000\u0000\u0469\u046a\u0005n\u0000\u0000\u046a\u046b\u0001"+ - "\u0000\u0000\u0000\u046b\u046c\u0006{\u0019\u0000\u046c\u0107\u0001\u0000"+ - "\u0000\u0000\u046d\u046e\u0005w\u0000\u0000\u046e\u046f\u0005i\u0000\u0000"+ - "\u046f\u0470\u0005t\u0000\u0000\u0470\u0471\u0005h\u0000\u0000\u0471\u0472"+ - "\u0001\u0000\u0000\u0000\u0472\u0473\u0006|\u0019\u0000\u0473\u0109\u0001"+ - "\u0000\u0000\u0000\u0474\u0475\b\f\u0000\u0000\u0475\u010b\u0001\u0000"+ - "\u0000\u0000\u0476\u0478\u0003\u010a}\u0000\u0477\u0476\u0001\u0000\u0000"+ - "\u0000\u0478\u0479\u0001\u0000\u0000\u0000\u0479\u0477\u0001\u0000\u0000"+ - "\u0000\u0479\u047a\u0001\u0000\u0000\u0000\u047a\u047b\u0001\u0000\u0000"+ - "\u0000\u047b\u047c\u0003\u0170\u00b0\u0000\u047c\u047e\u0001\u0000\u0000"+ - "\u0000\u047d\u0477\u0001\u0000\u0000\u0000\u047d\u047e\u0001\u0000\u0000"+ - "\u0000\u047e\u0480\u0001\u0000\u0000\u0000\u047f\u0481\u0003\u010a}\u0000"+ - "\u0480\u047f\u0001\u0000\u0000\u0000\u0481\u0482\u0001\u0000\u0000\u0000"+ - "\u0482\u0480\u0001\u0000\u0000\u0000\u0482\u0483\u0001\u0000\u0000\u0000"+ - "\u0483\u010d\u0001\u0000\u0000\u0000\u0484\u0485\u0003\u010c~\u0000\u0485"+ - "\u0486\u0001\u0000\u0000\u0000\u0486\u0487\u0006\u007f\u001a\u0000\u0487"+ - "\u010f\u0001\u0000\u0000\u0000\u0488\u0489\u0003<\u0016\u0000\u0489\u048a"+ - "\u0001\u0000\u0000\u0000\u048a\u048b\u0006\u0080\u000b\u0000\u048b\u0111"+ - "\u0001\u0000\u0000\u0000\u048c\u048d\u0003>\u0017\u0000\u048d\u048e\u0001"+ - "\u0000\u0000\u0000\u048e\u048f\u0006\u0081\u000b\u0000\u048f\u0113\u0001"+ - "\u0000\u0000\u0000\u0490\u0491\u0003@\u0018\u0000\u0491\u0492\u0001\u0000"+ - "\u0000\u0000\u0492\u0493\u0006\u0082\u000b\u0000\u0493\u0115\u0001\u0000"+ - "\u0000\u0000\u0494\u0495\u0003P \u0000\u0495\u0496\u0001\u0000\u0000\u0000"+ - "\u0496\u0497\u0006\u0083\u000e\u0000\u0497\u0498\u0006\u0083\u000f\u0000"+ - "\u0498\u0499\u0006\u0083\u000f\u0000\u0499\u0117\u0001\u0000\u0000\u0000"+ - "\u049a\u049b\u0003r1\u0000\u049b\u049c\u0001\u0000\u0000\u0000\u049c\u049d"+ - "\u0006\u0084\u0013\u0000\u049d\u0119\u0001\u0000\u0000\u0000\u049e\u049f"+ - "\u0003v3\u0000\u049f\u04a0\u0001\u0000\u0000\u0000\u04a0\u04a1\u0006\u0085"+ - "\u0012\u0000\u04a1\u011b\u0001\u0000\u0000\u0000\u04a2\u04a3\u0003z5\u0000"+ - "\u04a3\u04a4\u0001\u0000\u0000\u0000\u04a4\u04a5\u0006\u0086\u0016\u0000"+ - "\u04a5\u011d\u0001\u0000\u0000\u0000\u04a6\u04a7\u0003\u0108|\u0000\u04a7"+ - "\u04a8\u0001\u0000\u0000\u0000\u04a8\u04a9\u0006\u0087\u001b\u0000\u04a9"+ - "\u011f\u0001\u0000\u0000\u0000\u04aa\u04ab\u0003\u00e8l\u0000\u04ab\u04ac"+ - "\u0001\u0000\u0000\u0000\u04ac\u04ad\u0006\u0088\u0017\u0000\u04ad\u0121"+ - "\u0001\u0000\u0000\u0000\u04ae\u04af\u0003\u00beW\u0000\u04af\u04b0\u0001"+ - "\u0000\u0000\u0000\u04b0\u04b1\u0006\u0089\u001c\u0000\u04b1\u0123\u0001"+ - "\u0000\u0000\u0000\u04b2\u04b3\u0003<\u0016\u0000\u04b3\u04b4\u0001\u0000"+ - "\u0000\u0000\u04b4\u04b5\u0006\u008a\u000b\u0000\u04b5\u0125\u0001\u0000"+ - "\u0000\u0000\u04b6\u04b7\u0003>\u0017\u0000\u04b7\u04b8\u0001\u0000\u0000"+ - "\u0000\u04b8\u04b9\u0006\u008b\u000b\u0000\u04b9\u0127\u0001\u0000\u0000"+ - "\u0000\u04ba\u04bb\u0003@\u0018\u0000\u04bb\u04bc\u0001\u0000\u0000\u0000"+ - "\u04bc\u04bd\u0006\u008c\u000b\u0000\u04bd\u0129\u0001\u0000\u0000\u0000"+ - "\u04be\u04bf\u0003P \u0000\u04bf\u04c0\u0001\u0000\u0000\u0000\u04c0\u04c1"+ - "\u0006\u008d\u000e\u0000\u04c1\u04c2\u0006\u008d\u000f\u0000\u04c2\u012b"+ - "\u0001\u0000\u0000\u0000\u04c3\u04c4\u0003\u0170\u00b0\u0000\u04c4\u04c5"+ - "\u0001\u0000\u0000\u0000\u04c5\u04c6\u0006\u008e\u0011\u0000\u04c6\u012d"+ - "\u0001\u0000\u0000\u0000\u04c7\u04c8\u0003v3\u0000\u04c8\u04c9\u0001\u0000"+ - "\u0000\u0000\u04c9\u04ca\u0006\u008f\u0012\u0000\u04ca\u012f\u0001\u0000"+ - "\u0000\u0000\u04cb\u04cc\u0003z5\u0000\u04cc\u04cd\u0001\u0000\u0000\u0000"+ - "\u04cd\u04ce\u0006\u0090\u0016\u0000\u04ce\u0131\u0001\u0000\u0000\u0000"+ - "\u04cf\u04d0\u0003\u0106{\u0000\u04d0\u04d1\u0001\u0000\u0000\u0000\u04d1"+ - "\u04d2\u0006\u0091\u001d\u0000\u04d2\u04d3\u0006\u0091\u001e\u0000\u04d3"+ - "\u0133\u0001\u0000\u0000\u0000\u04d4\u04d5\u0003D\u001a\u0000\u04d5\u04d6"+ - "\u0001\u0000\u0000\u0000\u04d6\u04d7\u0006\u0092\u0014\u0000\u04d7\u0135"+ - "\u0001\u0000\u0000\u0000\u04d8\u04d9\u0003f+\u0000\u04d9\u04da\u0001\u0000"+ - "\u0000\u0000\u04da\u04db\u0006\u0093\u0015\u0000\u04db\u0137\u0001\u0000"+ - "\u0000\u0000\u04dc\u04dd\u0003<\u0016\u0000\u04dd\u04de\u0001\u0000\u0000"+ - "\u0000\u04de\u04df\u0006\u0094\u000b\u0000\u04df\u0139\u0001\u0000\u0000"+ - "\u0000\u04e0\u04e1\u0003>\u0017\u0000\u04e1\u04e2\u0001\u0000\u0000\u0000"+ - "\u04e2\u04e3\u0006\u0095\u000b\u0000\u04e3\u013b\u0001\u0000\u0000\u0000"+ - "\u04e4\u04e5\u0003@\u0018\u0000\u04e5\u04e6\u0001\u0000\u0000\u0000\u04e6"+ - "\u04e7\u0006\u0096\u000b\u0000\u04e7\u013d\u0001\u0000\u0000\u0000\u04e8"+ - "\u04e9\u0003P \u0000\u04e9\u04ea\u0001\u0000\u0000\u0000\u04ea\u04eb\u0006"+ - "\u0097\u000e\u0000\u04eb\u04ec\u0006\u0097\u000f\u0000\u04ec\u04ed\u0006"+ - "\u0097\u000f\u0000\u04ed\u013f\u0001\u0000\u0000\u0000\u04ee\u04ef\u0003"+ - "v3\u0000\u04ef\u04f0\u0001\u0000\u0000\u0000\u04f0\u04f1\u0006\u0098\u0012"+ - "\u0000\u04f1\u0141\u0001\u0000\u0000\u0000\u04f2\u04f3\u0003z5\u0000\u04f3"+ - "\u04f4\u0001\u0000\u0000\u0000\u04f4\u04f5\u0006\u0099\u0016\u0000\u04f5"+ - "\u0143\u0001\u0000\u0000\u0000\u04f6\u04f7\u0003\u00e8l\u0000\u04f7\u04f8"+ - "\u0001\u0000\u0000\u0000\u04f8\u04f9\u0006\u009a\u0017\u0000\u04f9\u0145"+ - "\u0001\u0000\u0000\u0000\u04fa\u04fb\u0003<\u0016\u0000\u04fb\u04fc\u0001"+ - "\u0000\u0000\u0000\u04fc\u04fd\u0006\u009b\u000b\u0000\u04fd\u0147\u0001"+ - "\u0000\u0000\u0000\u04fe\u04ff\u0003>\u0017\u0000\u04ff\u0500\u0001\u0000"+ - "\u0000\u0000\u0500\u0501\u0006\u009c\u000b\u0000\u0501\u0149\u0001\u0000"+ - "\u0000\u0000\u0502\u0503\u0003@\u0018\u0000\u0503\u0504\u0001\u0000\u0000"+ - "\u0000\u0504\u0505\u0006\u009d\u000b\u0000\u0505\u014b\u0001\u0000\u0000"+ - "\u0000\u0506\u0507\u0003P \u0000\u0507\u0508\u0001\u0000\u0000\u0000\u0508"+ - "\u0509\u0006\u009e\u000e\u0000\u0509\u050a\u0006\u009e\u000f\u0000\u050a"+ - "\u014d\u0001\u0000\u0000\u0000\u050b\u050c\u0003z5\u0000\u050c\u050d\u0001"+ - "\u0000\u0000\u0000\u050d\u050e\u0006\u009f\u0016\u0000\u050e\u014f\u0001"+ - "\u0000\u0000\u0000\u050f\u0510\u0003\u00beW\u0000\u0510\u0511\u0001\u0000"+ - "\u0000\u0000\u0511\u0512\u0006\u00a0\u001c\u0000\u0512\u0151\u0001\u0000"+ - "\u0000\u0000\u0513\u0514\u0003\u00baU\u0000\u0514\u0515\u0001\u0000\u0000"+ - "\u0000\u0515\u0516\u0006\u00a1\u001f\u0000\u0516\u0153\u0001\u0000\u0000"+ - "\u0000\u0517\u0518\u0003<\u0016\u0000\u0518\u0519\u0001\u0000\u0000\u0000"+ - "\u0519\u051a\u0006\u00a2\u000b\u0000\u051a\u0155\u0001\u0000\u0000\u0000"+ - "\u051b\u051c\u0003>\u0017\u0000\u051c\u051d\u0001\u0000\u0000\u0000\u051d"+ - "\u051e\u0006\u00a3\u000b\u0000\u051e\u0157\u0001\u0000\u0000\u0000\u051f"+ - "\u0520\u0003@\u0018\u0000\u0520\u0521\u0001\u0000\u0000\u0000\u0521\u0522"+ - "\u0006\u00a4\u000b\u0000\u0522\u0159\u0001\u0000\u0000\u0000\u0523\u0524"+ - "\u0003P \u0000\u0524\u0525\u0001\u0000\u0000\u0000\u0525\u0526\u0006\u00a5"+ - "\u000e\u0000\u0526\u0527\u0006\u00a5\u000f\u0000\u0527\u015b\u0001\u0000"+ - "\u0000\u0000\u0528\u0529\u0005i\u0000\u0000\u0529\u052a\u0005n\u0000\u0000"+ - "\u052a\u052b\u0005f\u0000\u0000\u052b\u052c\u0005o\u0000\u0000\u052c\u015d"+ - "\u0001\u0000\u0000\u0000\u052d\u052e\u0003<\u0016\u0000\u052e\u052f\u0001"+ - "\u0000\u0000\u0000\u052f\u0530\u0006\u00a7\u000b\u0000\u0530\u015f\u0001"+ - "\u0000\u0000\u0000\u0531\u0532\u0003>\u0017\u0000\u0532\u0533\u0001\u0000"+ - "\u0000\u0000\u0533\u0534\u0006\u00a8\u000b\u0000\u0534\u0161\u0001\u0000"+ - "\u0000\u0000\u0535\u0536\u0003@\u0018\u0000\u0536\u0537\u0001\u0000\u0000"+ - "\u0000\u0537\u0538\u0006\u00a9\u000b\u0000\u0538\u0163\u0001\u0000\u0000"+ - "\u0000\u0539\u053a\u0003P \u0000\u053a\u053b\u0001\u0000\u0000\u0000\u053b"+ - "\u053c\u0006\u00aa\u000e\u0000\u053c\u053d\u0006\u00aa\u000f\u0000\u053d"+ - "\u0165\u0001\u0000\u0000\u0000\u053e\u053f\u0005f\u0000\u0000\u053f\u0540"+ - "\u0005u\u0000\u0000\u0540\u0541\u0005n\u0000\u0000\u0541\u0542\u0005c"+ - "\u0000\u0000\u0542\u0543\u0005t\u0000\u0000\u0543\u0544\u0005i\u0000\u0000"+ - "\u0544\u0545\u0005o\u0000\u0000\u0545\u0546\u0005n\u0000\u0000\u0546\u0547"+ - "\u0005s\u0000\u0000\u0547\u0167\u0001\u0000\u0000\u0000\u0548\u0549\u0003"+ - "<\u0016\u0000\u0549\u054a\u0001\u0000\u0000\u0000\u054a\u054b\u0006\u00ac"+ - "\u000b\u0000\u054b\u0169\u0001\u0000\u0000\u0000\u054c\u054d\u0003>\u0017"+ - "\u0000\u054d\u054e\u0001\u0000\u0000\u0000\u054e\u054f\u0006\u00ad\u000b"+ - "\u0000\u054f\u016b\u0001\u0000\u0000\u0000\u0550\u0551\u0003@\u0018\u0000"+ - "\u0551\u0552\u0001\u0000\u0000\u0000\u0552\u0553\u0006\u00ae\u000b\u0000"+ - "\u0553\u016d\u0001\u0000\u0000\u0000\u0554\u0555\u0003\u00b8T\u0000\u0555"+ - "\u0556\u0001\u0000\u0000\u0000\u0556\u0557\u0006\u00af\u0010\u0000\u0557"+ - "\u0558\u0006\u00af\u000f\u0000\u0558\u016f\u0001\u0000\u0000\u0000\u0559"+ - "\u055a\u0005:\u0000\u0000\u055a\u0171\u0001\u0000\u0000\u0000\u055b\u0561"+ - "\u0003\\&\u0000\u055c\u0561\u0003R!\u0000\u055d\u0561\u0003z5\u0000\u055e"+ - "\u0561\u0003T\"\u0000\u055f\u0561\u0003b)\u0000\u0560\u055b\u0001\u0000"+ - "\u0000\u0000\u0560\u055c\u0001\u0000\u0000\u0000\u0560\u055d\u0001\u0000"+ - "\u0000\u0000\u0560\u055e\u0001\u0000\u0000\u0000\u0560\u055f\u0001\u0000"+ - "\u0000\u0000\u0561\u0562\u0001\u0000\u0000\u0000\u0562\u0560\u0001\u0000"+ - "\u0000\u0000\u0562\u0563\u0001\u0000\u0000\u0000\u0563\u0173\u0001\u0000"+ - "\u0000\u0000\u0564\u0565\u0003<\u0016\u0000\u0565\u0566\u0001\u0000\u0000"+ - "\u0000\u0566\u0567\u0006\u00b2\u000b\u0000\u0567\u0175\u0001\u0000\u0000"+ - "\u0000\u0568\u0569\u0003>\u0017\u0000\u0569\u056a\u0001\u0000\u0000\u0000"+ - "\u056a\u056b\u0006\u00b3\u000b\u0000\u056b\u0177\u0001\u0000\u0000\u0000"+ - "\u056c\u056d\u0003@\u0018\u0000\u056d\u056e\u0001\u0000\u0000\u0000\u056e"+ - "\u056f\u0006\u00b4\u000b\u0000\u056f\u0179\u0001\u0000\u0000\u0000\u0570"+ - "\u0571\u0003P \u0000\u0571\u0572\u0001\u0000\u0000\u0000\u0572\u0573\u0006"+ - "\u00b5\u000e\u0000\u0573\u0574\u0006\u00b5\u000f\u0000\u0574\u017b\u0001"+ - "\u0000\u0000\u0000\u0575\u0576\u0003D\u001a\u0000\u0576\u0577\u0001\u0000"+ - "\u0000\u0000\u0577\u0578\u0006\u00b6\u0014\u0000\u0578\u0579\u0006\u00b6"+ - "\u000f\u0000\u0579\u057a\u0006\u00b6 \u0000\u057a\u017d\u0001\u0000\u0000"+ - "\u0000\u057b\u057c\u0003f+\u0000\u057c\u057d\u0001\u0000\u0000\u0000\u057d"+ - "\u057e\u0006\u00b7\u0015\u0000\u057e\u057f\u0006\u00b7\u000f\u0000\u057f"+ - "\u0580\u0006\u00b7 \u0000\u0580\u017f\u0001\u0000\u0000\u0000\u0581\u0582"+ - "\u0003<\u0016\u0000\u0582\u0583\u0001\u0000\u0000\u0000\u0583\u0584\u0006"+ - "\u00b8\u000b\u0000\u0584\u0181\u0001\u0000\u0000\u0000\u0585\u0586\u0003"+ - ">\u0017\u0000\u0586\u0587\u0001\u0000\u0000\u0000\u0587\u0588\u0006\u00b9"+ - "\u000b\u0000\u0588\u0183\u0001\u0000\u0000\u0000\u0589\u058a\u0003@\u0018"+ - "\u0000\u058a\u058b\u0001\u0000\u0000\u0000\u058b\u058c\u0006\u00ba\u000b"+ - "\u0000\u058c\u0185\u0001\u0000\u0000\u0000\u058d\u058e\u0003\u0170\u00b0"+ - "\u0000\u058e\u058f\u0001\u0000\u0000\u0000\u058f\u0590\u0006\u00bb\u0011"+ - "\u0000\u0590\u0591\u0006\u00bb\u000f\u0000\u0591\u0592\u0006\u00bb\u0007"+ - "\u0000\u0592\u0187\u0001\u0000\u0000\u0000\u0593\u0594\u0003v3\u0000\u0594"+ - "\u0595\u0001\u0000\u0000\u0000\u0595\u0596\u0006\u00bc\u0012\u0000\u0596"+ - "\u0597\u0006\u00bc\u000f\u0000\u0597\u0598\u0006\u00bc\u0007\u0000\u0598"+ - "\u0189\u0001\u0000\u0000\u0000\u0599\u059a\u0003<\u0016\u0000\u059a\u059b"+ - "\u0001\u0000\u0000\u0000\u059b\u059c\u0006\u00bd\u000b\u0000\u059c\u018b"+ - "\u0001\u0000\u0000\u0000\u059d\u059e\u0003>\u0017\u0000\u059e\u059f\u0001"+ - "\u0000\u0000\u0000\u059f\u05a0\u0006\u00be\u000b\u0000\u05a0\u018d\u0001"+ - "\u0000\u0000\u0000\u05a1\u05a2\u0003@\u0018\u0000\u05a2\u05a3\u0001\u0000"+ - "\u0000\u0000\u05a3\u05a4\u0006\u00bf\u000b\u0000\u05a4\u018f\u0001\u0000"+ - "\u0000\u0000\u05a5\u05a6\u0003\u00beW\u0000\u05a6\u05a7\u0001\u0000\u0000"+ - "\u0000\u05a7\u05a8\u0006\u00c0\u000f\u0000\u05a8\u05a9\u0006\u00c0\u0000"+ - "\u0000\u05a9\u05aa\u0006\u00c0\u001c\u0000\u05aa\u0191\u0001\u0000\u0000"+ - "\u0000\u05ab\u05ac\u0003\u00baU\u0000\u05ac\u05ad\u0001\u0000\u0000\u0000"+ - "\u05ad\u05ae\u0006\u00c1\u000f\u0000\u05ae\u05af\u0006\u00c1\u0000\u0000"+ - "\u05af\u05b0\u0006\u00c1\u001f\u0000\u05b0\u0193\u0001\u0000\u0000\u0000"+ - "\u05b1\u05b2\u0003l.\u0000\u05b2\u05b3\u0001\u0000\u0000\u0000\u05b3\u05b4"+ - "\u0006\u00c2\u000f\u0000\u05b4\u05b5\u0006\u00c2\u0000\u0000\u05b5\u05b6"+ - "\u0006\u00c2!\u0000\u05b6\u0195\u0001\u0000\u0000\u0000\u05b7\u05b8\u0003"+ - "P \u0000\u05b8\u05b9\u0001\u0000\u0000\u0000\u05b9\u05ba\u0006\u00c3\u000e"+ - "\u0000\u05ba\u05bb\u0006\u00c3\u000f\u0000\u05bb\u0197\u0001\u0000\u0000"+ - "\u0000A\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f"+ + "\u0000\u0388\u038b\u0003\u0094B\u0000\u0389\u038c\u0003T\"\u0000\u038a"+ + "\u038c\u0003b)\u0000\u038b\u0389\u0001\u0000\u0000\u0000\u038b\u038a\u0001"+ + "\u0000\u0000\u0000\u038c\u0390\u0001\u0000\u0000\u0000\u038d\u038f\u0003"+ + "d*\u0000\u038e\u038d\u0001\u0000\u0000\u0000\u038f\u0392\u0001\u0000\u0000"+ + "\u0000\u0390\u038e\u0001\u0000\u0000\u0000\u0390\u0391\u0001\u0000\u0000"+ + "\u0000\u0391\u039a\u0001\u0000\u0000\u0000\u0392\u0390\u0001\u0000\u0000"+ + "\u0000\u0393\u0395\u0003\u0094B\u0000\u0394\u0396\u0003R!\u0000\u0395"+ + "\u0394\u0001\u0000\u0000\u0000\u0396\u0397\u0001\u0000\u0000\u0000\u0397"+ + "\u0395\u0001\u0000\u0000\u0000\u0397\u0398\u0001\u0000\u0000\u0000\u0398"+ + "\u039a\u0001\u0000\u0000\u0000\u0399\u0388\u0001\u0000\u0000\u0000\u0399"+ + "\u0393\u0001\u0000\u0000\u0000\u039a\u00b5\u0001\u0000\u0000\u0000\u039b"+ + "\u039c\u0005[\u0000\u0000\u039c\u039d\u0001\u0000\u0000\u0000\u039d\u039e"+ + "\u0006S\u0000\u0000\u039e\u039f\u0006S\u0000\u0000\u039f\u00b7\u0001\u0000"+ + "\u0000\u0000\u03a0\u03a1\u0005]\u0000\u0000\u03a1\u03a2\u0001\u0000\u0000"+ + "\u0000\u03a2\u03a3\u0006T\u000f\u0000\u03a3\u03a4\u0006T\u000f\u0000\u03a4"+ + "\u00b9\u0001\u0000\u0000\u0000\u03a5\u03a9\u0003T\"\u0000\u03a6\u03a8"+ + "\u0003d*\u0000\u03a7\u03a6\u0001\u0000\u0000\u0000\u03a8\u03ab\u0001\u0000"+ + "\u0000\u0000\u03a9\u03a7\u0001\u0000\u0000\u0000\u03a9\u03aa\u0001\u0000"+ + "\u0000\u0000\u03aa\u03b6\u0001\u0000\u0000\u0000\u03ab\u03a9\u0001\u0000"+ + "\u0000\u0000\u03ac\u03af\u0003b)\u0000\u03ad\u03af\u0003\\&\u0000\u03ae"+ + "\u03ac\u0001\u0000\u0000\u0000\u03ae\u03ad\u0001\u0000\u0000\u0000\u03af"+ + "\u03b1\u0001\u0000\u0000\u0000\u03b0\u03b2\u0003d*\u0000\u03b1\u03b0\u0001"+ + "\u0000\u0000\u0000\u03b2\u03b3\u0001\u0000\u0000\u0000\u03b3\u03b1\u0001"+ + "\u0000\u0000\u0000\u03b3\u03b4\u0001\u0000\u0000\u0000\u03b4\u03b6\u0001"+ + "\u0000\u0000\u0000\u03b5\u03a5\u0001\u0000\u0000\u0000\u03b5\u03ae\u0001"+ + "\u0000\u0000\u0000\u03b6\u00bb\u0001\u0000\u0000\u0000\u03b7\u03b9\u0003"+ + "^\'\u0000\u03b8\u03ba\u0003`(\u0000\u03b9\u03b8\u0001\u0000\u0000\u0000"+ + "\u03ba\u03bb\u0001\u0000\u0000\u0000\u03bb\u03b9\u0001\u0000\u0000\u0000"+ + "\u03bb\u03bc\u0001\u0000\u0000\u0000\u03bc\u03bd\u0001\u0000\u0000\u0000"+ + "\u03bd\u03be\u0003^\'\u0000\u03be\u00bd\u0001\u0000\u0000\u0000\u03bf"+ + "\u03c0\u0003\u00bcV\u0000\u03c0\u00bf\u0001\u0000\u0000\u0000\u03c1\u03c2"+ + "\u0003<\u0016\u0000\u03c2\u03c3\u0001\u0000\u0000\u0000\u03c3\u03c4\u0006"+ + "X\u000b\u0000\u03c4\u00c1\u0001\u0000\u0000\u0000\u03c5\u03c6\u0003>\u0017"+ + "\u0000\u03c6\u03c7\u0001\u0000\u0000\u0000\u03c7\u03c8\u0006Y\u000b\u0000"+ + "\u03c8\u00c3\u0001\u0000\u0000\u0000\u03c9\u03ca\u0003@\u0018\u0000\u03ca"+ + "\u03cb\u0001\u0000\u0000\u0000\u03cb\u03cc\u0006Z\u000b\u0000\u03cc\u00c5"+ + "\u0001\u0000\u0000\u0000\u03cd\u03ce\u0003P \u0000\u03ce\u03cf\u0001\u0000"+ + "\u0000\u0000\u03cf\u03d0\u0006[\u000e\u0000\u03d0\u03d1\u0006[\u000f\u0000"+ + "\u03d1\u00c7\u0001\u0000\u0000\u0000\u03d2\u03d3\u0003\u00b6S\u0000\u03d3"+ + "\u03d4\u0001\u0000\u0000\u0000\u03d4\u03d5\u0006\\\f\u0000\u03d5\u00c9"+ + "\u0001\u0000\u0000\u0000\u03d6\u03d7\u0003\u00b8T\u0000\u03d7\u03d8\u0001"+ + "\u0000\u0000\u0000\u03d8\u03d9\u0006]\u0010\u0000\u03d9\u00cb\u0001\u0000"+ + "\u0000\u0000\u03da\u03db\u0003\u0170\u00b0\u0000\u03db\u03dc\u0001\u0000"+ + "\u0000\u0000\u03dc\u03dd\u0006^\u0011\u0000\u03dd\u00cd\u0001\u0000\u0000"+ + "\u0000\u03de\u03df\u0003v3\u0000\u03df\u03e0\u0001\u0000\u0000\u0000\u03e0"+ + "\u03e1\u0006_\u0012\u0000\u03e1\u00cf\u0001\u0000\u0000\u0000\u03e2\u03e3"+ + "\u0003r1\u0000\u03e3\u03e4\u0001\u0000\u0000\u0000\u03e4\u03e5\u0006`"+ + "\u0013\u0000\u03e5\u00d1\u0001\u0000\u0000\u0000\u03e6\u03e7\u0005m\u0000"+ + "\u0000\u03e7\u03e8\u0005e\u0000\u0000\u03e8\u03e9\u0005t\u0000\u0000\u03e9"+ + "\u03ea\u0005a\u0000\u0000\u03ea\u03eb\u0005d\u0000\u0000\u03eb\u03ec\u0005"+ + "a\u0000\u0000\u03ec\u03ed\u0005t\u0000\u0000\u03ed\u03ee\u0005a\u0000"+ + "\u0000\u03ee\u00d3\u0001\u0000\u0000\u0000\u03ef\u03f0\u0003D\u001a\u0000"+ + "\u03f0\u03f1\u0001\u0000\u0000\u0000\u03f1\u03f2\u0006b\u0014\u0000\u03f2"+ + "\u00d5\u0001\u0000\u0000\u0000\u03f3\u03f4\u0003f+\u0000\u03f4\u03f5\u0001"+ + "\u0000\u0000\u0000\u03f5\u03f6\u0006c\u0015\u0000\u03f6\u00d7\u0001\u0000"+ + "\u0000\u0000\u03f7\u03f8\u0003<\u0016\u0000\u03f8\u03f9\u0001\u0000\u0000"+ + "\u0000\u03f9\u03fa\u0006d\u000b\u0000\u03fa\u00d9\u0001\u0000\u0000\u0000"+ + "\u03fb\u03fc\u0003>\u0017\u0000\u03fc\u03fd\u0001\u0000\u0000\u0000\u03fd"+ + "\u03fe\u0006e\u000b\u0000\u03fe\u00db\u0001\u0000\u0000\u0000\u03ff\u0400"+ + "\u0003@\u0018\u0000\u0400\u0401\u0001\u0000\u0000\u0000\u0401\u0402\u0006"+ + "f\u000b\u0000\u0402\u00dd\u0001\u0000\u0000\u0000\u0403\u0404\u0003P "+ + "\u0000\u0404\u0405\u0001\u0000\u0000\u0000\u0405\u0406\u0006g\u000e\u0000"+ + "\u0406\u0407\u0006g\u000f\u0000\u0407\u00df\u0001\u0000\u0000\u0000\u0408"+ + "\u0409\u0003z5\u0000\u0409\u040a\u0001\u0000\u0000\u0000\u040a\u040b\u0006"+ + "h\u0016\u0000\u040b\u00e1\u0001\u0000\u0000\u0000\u040c\u040d\u0003v3"+ + "\u0000\u040d\u040e\u0001\u0000\u0000\u0000\u040e\u040f\u0006i\u0012\u0000"+ + "\u040f\u00e3\u0001\u0000\u0000\u0000\u0410\u0415\u0003T\"\u0000\u0411"+ + "\u0415\u0003R!\u0000\u0412\u0415\u0003b)\u0000\u0413\u0415\u0003\u00ae"+ + "O\u0000\u0414\u0410\u0001\u0000\u0000\u0000\u0414\u0411\u0001\u0000\u0000"+ + "\u0000\u0414\u0412\u0001\u0000\u0000\u0000\u0414\u0413\u0001\u0000\u0000"+ + "\u0000\u0415\u00e5\u0001\u0000\u0000\u0000\u0416\u0419\u0003T\"\u0000"+ + "\u0417\u0419\u0003\u00aeO\u0000\u0418\u0416\u0001\u0000\u0000\u0000\u0418"+ + "\u0417\u0001\u0000\u0000\u0000\u0419\u041d\u0001\u0000\u0000\u0000\u041a"+ + "\u041c\u0003\u00e4j\u0000\u041b\u041a\u0001\u0000\u0000\u0000\u041c\u041f"+ + "\u0001\u0000\u0000\u0000\u041d\u041b\u0001\u0000\u0000\u0000\u041d\u041e"+ + "\u0001\u0000\u0000\u0000\u041e\u042a\u0001\u0000\u0000\u0000\u041f\u041d"+ + "\u0001\u0000\u0000\u0000\u0420\u0423\u0003b)\u0000\u0421\u0423\u0003\\"+ + "&\u0000\u0422\u0420\u0001\u0000\u0000\u0000\u0422\u0421\u0001\u0000\u0000"+ + "\u0000\u0423\u0425\u0001\u0000\u0000\u0000\u0424\u0426\u0003\u00e4j\u0000"+ + "\u0425\u0424\u0001\u0000\u0000\u0000\u0426\u0427\u0001\u0000\u0000\u0000"+ + "\u0427\u0425\u0001\u0000\u0000\u0000\u0427\u0428\u0001\u0000\u0000\u0000"+ + "\u0428\u042a\u0001\u0000\u0000\u0000\u0429\u0418\u0001\u0000\u0000\u0000"+ + "\u0429\u0422\u0001\u0000\u0000\u0000\u042a\u00e7\u0001\u0000\u0000\u0000"+ + "\u042b\u042e\u0003\u00e6k\u0000\u042c\u042e\u0003\u00bcV\u0000\u042d\u042b"+ + "\u0001\u0000\u0000\u0000\u042d\u042c\u0001\u0000\u0000\u0000\u042e\u042f"+ + "\u0001\u0000\u0000\u0000\u042f\u042d\u0001\u0000\u0000\u0000\u042f\u0430"+ + "\u0001\u0000\u0000\u0000\u0430\u00e9\u0001\u0000\u0000\u0000\u0431\u0432"+ + "\u0003<\u0016\u0000\u0432\u0433\u0001\u0000\u0000\u0000\u0433\u0434\u0006"+ + "m\u000b\u0000\u0434\u00eb\u0001\u0000\u0000\u0000\u0435\u0436\u0003>\u0017"+ + "\u0000\u0436\u0437\u0001\u0000\u0000\u0000\u0437\u0438\u0006n\u000b\u0000"+ + "\u0438\u00ed\u0001\u0000\u0000\u0000\u0439\u043a\u0003@\u0018\u0000\u043a"+ + "\u043b\u0001\u0000\u0000\u0000\u043b\u043c\u0006o\u000b\u0000\u043c\u00ef"+ + "\u0001\u0000\u0000\u0000\u043d\u043e\u0003P \u0000\u043e\u043f\u0001\u0000"+ + "\u0000\u0000\u043f\u0440\u0006p\u000e\u0000\u0440\u0441\u0006p\u000f\u0000"+ + "\u0441\u00f1\u0001\u0000\u0000\u0000\u0442\u0443\u0003r1\u0000\u0443\u0444"+ + "\u0001\u0000\u0000\u0000\u0444\u0445\u0006q\u0013\u0000\u0445\u00f3\u0001"+ + "\u0000\u0000\u0000\u0446\u0447\u0003v3\u0000\u0447\u0448\u0001\u0000\u0000"+ + "\u0000\u0448\u0449\u0006r\u0012\u0000\u0449\u00f5\u0001\u0000\u0000\u0000"+ + "\u044a\u044b\u0003z5\u0000\u044b\u044c\u0001\u0000\u0000\u0000\u044c\u044d"+ + "\u0006s\u0016\u0000\u044d\u00f7\u0001\u0000\u0000\u0000\u044e\u044f\u0005"+ + "a\u0000\u0000\u044f\u0450\u0005s\u0000\u0000\u0450\u00f9\u0001\u0000\u0000"+ + "\u0000\u0451\u0452\u0003\u00e8l\u0000\u0452\u0453\u0001\u0000\u0000\u0000"+ + "\u0453\u0454\u0006u\u0017\u0000\u0454\u00fb\u0001\u0000\u0000\u0000\u0455"+ + "\u0456\u0003<\u0016\u0000\u0456\u0457\u0001\u0000\u0000\u0000\u0457\u0458"+ + "\u0006v\u000b\u0000\u0458\u00fd\u0001\u0000\u0000\u0000\u0459\u045a\u0003"+ + ">\u0017\u0000\u045a\u045b\u0001\u0000\u0000\u0000\u045b\u045c\u0006w\u000b"+ + "\u0000\u045c\u00ff\u0001\u0000\u0000\u0000\u045d\u045e\u0003@\u0018\u0000"+ + "\u045e\u045f\u0001\u0000\u0000\u0000\u045f\u0460\u0006x\u000b\u0000\u0460"+ + "\u0101\u0001\u0000\u0000\u0000\u0461\u0462\u0003P \u0000\u0462\u0463\u0001"+ + "\u0000\u0000\u0000\u0463\u0464\u0006y\u000e\u0000\u0464\u0465\u0006y\u000f"+ + "\u0000\u0465\u0103\u0001\u0000\u0000\u0000\u0466\u0467\u0003\u00b6S\u0000"+ + "\u0467\u0468\u0001\u0000\u0000\u0000\u0468\u0469\u0006z\f\u0000\u0469"+ + "\u046a\u0006z\u0018\u0000\u046a\u0105\u0001\u0000\u0000\u0000\u046b\u046c"+ + "\u0005o\u0000\u0000\u046c\u046d\u0005n\u0000\u0000\u046d\u046e\u0001\u0000"+ + "\u0000\u0000\u046e\u046f\u0006{\u0019\u0000\u046f\u0107\u0001\u0000\u0000"+ + "\u0000\u0470\u0471\u0005w\u0000\u0000\u0471\u0472\u0005i\u0000\u0000\u0472"+ + "\u0473\u0005t\u0000\u0000\u0473\u0474\u0005h\u0000\u0000\u0474\u0475\u0001"+ + "\u0000\u0000\u0000\u0475\u0476\u0006|\u0019\u0000\u0476\u0109\u0001\u0000"+ + "\u0000\u0000\u0477\u0478\b\f\u0000\u0000\u0478\u010b\u0001\u0000\u0000"+ + "\u0000\u0479\u047b\u0003\u010a}\u0000\u047a\u0479\u0001\u0000\u0000\u0000"+ + "\u047b\u047c\u0001\u0000\u0000\u0000\u047c\u047a\u0001\u0000\u0000\u0000"+ + "\u047c\u047d\u0001\u0000\u0000\u0000\u047d\u047e\u0001\u0000\u0000\u0000"+ + "\u047e\u047f\u0003\u0170\u00b0\u0000\u047f\u0481\u0001\u0000\u0000\u0000"+ + "\u0480\u047a\u0001\u0000\u0000\u0000\u0480\u0481\u0001\u0000\u0000\u0000"+ + "\u0481\u0483\u0001\u0000\u0000\u0000\u0482\u0484\u0003\u010a}\u0000\u0483"+ + "\u0482\u0001\u0000\u0000\u0000\u0484\u0485\u0001\u0000\u0000\u0000\u0485"+ + "\u0483\u0001\u0000\u0000\u0000\u0485\u0486\u0001\u0000\u0000\u0000\u0486"+ + "\u010d\u0001\u0000\u0000\u0000\u0487\u0488\u0003\u010c~\u0000\u0488\u0489"+ + "\u0001\u0000\u0000\u0000\u0489\u048a\u0006\u007f\u001a\u0000\u048a\u010f"+ + "\u0001\u0000\u0000\u0000\u048b\u048c\u0003<\u0016\u0000\u048c\u048d\u0001"+ + "\u0000\u0000\u0000\u048d\u048e\u0006\u0080\u000b\u0000\u048e\u0111\u0001"+ + "\u0000\u0000\u0000\u048f\u0490\u0003>\u0017\u0000\u0490\u0491\u0001\u0000"+ + "\u0000\u0000\u0491\u0492\u0006\u0081\u000b\u0000\u0492\u0113\u0001\u0000"+ + "\u0000\u0000\u0493\u0494\u0003@\u0018\u0000\u0494\u0495\u0001\u0000\u0000"+ + "\u0000\u0495\u0496\u0006\u0082\u000b\u0000\u0496\u0115\u0001\u0000\u0000"+ + "\u0000\u0497\u0498\u0003P \u0000\u0498\u0499\u0001\u0000\u0000\u0000\u0499"+ + "\u049a\u0006\u0083\u000e\u0000\u049a\u049b\u0006\u0083\u000f\u0000\u049b"+ + "\u049c\u0006\u0083\u000f\u0000\u049c\u0117\u0001\u0000\u0000\u0000\u049d"+ + "\u049e\u0003r1\u0000\u049e\u049f\u0001\u0000\u0000\u0000\u049f\u04a0\u0006"+ + "\u0084\u0013\u0000\u04a0\u0119\u0001\u0000\u0000\u0000\u04a1\u04a2\u0003"+ + "v3\u0000\u04a2\u04a3\u0001\u0000\u0000\u0000\u04a3\u04a4\u0006\u0085\u0012"+ + "\u0000\u04a4\u011b\u0001\u0000\u0000\u0000\u04a5\u04a6\u0003z5\u0000\u04a6"+ + "\u04a7\u0001\u0000\u0000\u0000\u04a7\u04a8\u0006\u0086\u0016\u0000\u04a8"+ + "\u011d\u0001\u0000\u0000\u0000\u04a9\u04aa\u0003\u0108|\u0000\u04aa\u04ab"+ + "\u0001\u0000\u0000\u0000\u04ab\u04ac\u0006\u0087\u001b\u0000\u04ac\u011f"+ + "\u0001\u0000\u0000\u0000\u04ad\u04ae\u0003\u00e8l\u0000\u04ae\u04af\u0001"+ + "\u0000\u0000\u0000\u04af\u04b0\u0006\u0088\u0017\u0000\u04b0\u0121\u0001"+ + "\u0000\u0000\u0000\u04b1\u04b2\u0003\u00beW\u0000\u04b2\u04b3\u0001\u0000"+ + "\u0000\u0000\u04b3\u04b4\u0006\u0089\u001c\u0000\u04b4\u0123\u0001\u0000"+ + "\u0000\u0000\u04b5\u04b6\u0003<\u0016\u0000\u04b6\u04b7\u0001\u0000\u0000"+ + "\u0000\u04b7\u04b8\u0006\u008a\u000b\u0000\u04b8\u0125\u0001\u0000\u0000"+ + "\u0000\u04b9\u04ba\u0003>\u0017\u0000\u04ba\u04bb\u0001\u0000\u0000\u0000"+ + "\u04bb\u04bc\u0006\u008b\u000b\u0000\u04bc\u0127\u0001\u0000\u0000\u0000"+ + "\u04bd\u04be\u0003@\u0018\u0000\u04be\u04bf\u0001\u0000\u0000\u0000\u04bf"+ + "\u04c0\u0006\u008c\u000b\u0000\u04c0\u0129\u0001\u0000\u0000\u0000\u04c1"+ + "\u04c2\u0003P \u0000\u04c2\u04c3\u0001\u0000\u0000\u0000\u04c3\u04c4\u0006"+ + "\u008d\u000e\u0000\u04c4\u04c5\u0006\u008d\u000f\u0000\u04c5\u012b\u0001"+ + "\u0000\u0000\u0000\u04c6\u04c7\u0003\u0170\u00b0\u0000\u04c7\u04c8\u0001"+ + "\u0000\u0000\u0000\u04c8\u04c9\u0006\u008e\u0011\u0000\u04c9\u012d\u0001"+ + "\u0000\u0000\u0000\u04ca\u04cb\u0003v3\u0000\u04cb\u04cc\u0001\u0000\u0000"+ + "\u0000\u04cc\u04cd\u0006\u008f\u0012\u0000\u04cd\u012f\u0001\u0000\u0000"+ + "\u0000\u04ce\u04cf\u0003z5\u0000\u04cf\u04d0\u0001\u0000\u0000\u0000\u04d0"+ + "\u04d1\u0006\u0090\u0016\u0000\u04d1\u0131\u0001\u0000\u0000\u0000\u04d2"+ + "\u04d3\u0003\u0106{\u0000\u04d3\u04d4\u0001\u0000\u0000\u0000\u04d4\u04d5"+ + "\u0006\u0091\u001d\u0000\u04d5\u04d6\u0006\u0091\u001e\u0000\u04d6\u0133"+ + "\u0001\u0000\u0000\u0000\u04d7\u04d8\u0003D\u001a\u0000\u04d8\u04d9\u0001"+ + "\u0000\u0000\u0000\u04d9\u04da\u0006\u0092\u0014\u0000\u04da\u0135\u0001"+ + "\u0000\u0000\u0000\u04db\u04dc\u0003f+\u0000\u04dc\u04dd\u0001\u0000\u0000"+ + "\u0000\u04dd\u04de\u0006\u0093\u0015\u0000\u04de\u0137\u0001\u0000\u0000"+ + "\u0000\u04df\u04e0\u0003<\u0016\u0000\u04e0\u04e1\u0001\u0000\u0000\u0000"+ + "\u04e1\u04e2\u0006\u0094\u000b\u0000\u04e2\u0139\u0001\u0000\u0000\u0000"+ + "\u04e3\u04e4\u0003>\u0017\u0000\u04e4\u04e5\u0001\u0000\u0000\u0000\u04e5"+ + "\u04e6\u0006\u0095\u000b\u0000\u04e6\u013b\u0001\u0000\u0000\u0000\u04e7"+ + "\u04e8\u0003@\u0018\u0000\u04e8\u04e9\u0001\u0000\u0000\u0000\u04e9\u04ea"+ + "\u0006\u0096\u000b\u0000\u04ea\u013d\u0001\u0000\u0000\u0000\u04eb\u04ec"+ + "\u0003P \u0000\u04ec\u04ed\u0001\u0000\u0000\u0000\u04ed\u04ee\u0006\u0097"+ + "\u000e\u0000\u04ee\u04ef\u0006\u0097\u000f\u0000\u04ef\u04f0\u0006\u0097"+ + "\u000f\u0000\u04f0\u013f\u0001\u0000\u0000\u0000\u04f1\u04f2\u0003v3\u0000"+ + "\u04f2\u04f3\u0001\u0000\u0000\u0000\u04f3\u04f4\u0006\u0098\u0012\u0000"+ + "\u04f4\u0141\u0001\u0000\u0000\u0000\u04f5\u04f6\u0003z5\u0000\u04f6\u04f7"+ + "\u0001\u0000\u0000\u0000\u04f7\u04f8\u0006\u0099\u0016\u0000\u04f8\u0143"+ + "\u0001\u0000\u0000\u0000\u04f9\u04fa\u0003\u00e8l\u0000\u04fa\u04fb\u0001"+ + "\u0000\u0000\u0000\u04fb\u04fc\u0006\u009a\u0017\u0000\u04fc\u0145\u0001"+ + "\u0000\u0000\u0000\u04fd\u04fe\u0003<\u0016\u0000\u04fe\u04ff\u0001\u0000"+ + "\u0000\u0000\u04ff\u0500\u0006\u009b\u000b\u0000\u0500\u0147\u0001\u0000"+ + "\u0000\u0000\u0501\u0502\u0003>\u0017\u0000\u0502\u0503\u0001\u0000\u0000"+ + "\u0000\u0503\u0504\u0006\u009c\u000b\u0000\u0504\u0149\u0001\u0000\u0000"+ + "\u0000\u0505\u0506\u0003@\u0018\u0000\u0506\u0507\u0001\u0000\u0000\u0000"+ + "\u0507\u0508\u0006\u009d\u000b\u0000\u0508\u014b\u0001\u0000\u0000\u0000"+ + "\u0509\u050a\u0003P \u0000\u050a\u050b\u0001\u0000\u0000\u0000\u050b\u050c"+ + "\u0006\u009e\u000e\u0000\u050c\u050d\u0006\u009e\u000f\u0000\u050d\u014d"+ + "\u0001\u0000\u0000\u0000\u050e\u050f\u0003z5\u0000\u050f\u0510\u0001\u0000"+ + "\u0000\u0000\u0510\u0511\u0006\u009f\u0016\u0000\u0511\u014f\u0001\u0000"+ + "\u0000\u0000\u0512\u0513\u0003\u00beW\u0000\u0513\u0514\u0001\u0000\u0000"+ + "\u0000\u0514\u0515\u0006\u00a0\u001c\u0000\u0515\u0151\u0001\u0000\u0000"+ + "\u0000\u0516\u0517\u0003\u00baU\u0000\u0517\u0518\u0001\u0000\u0000\u0000"+ + "\u0518\u0519\u0006\u00a1\u001f\u0000\u0519\u0153\u0001\u0000\u0000\u0000"+ + "\u051a\u051b\u0003<\u0016\u0000\u051b\u051c\u0001\u0000\u0000\u0000\u051c"+ + "\u051d\u0006\u00a2\u000b\u0000\u051d\u0155\u0001\u0000\u0000\u0000\u051e"+ + "\u051f\u0003>\u0017\u0000\u051f\u0520\u0001\u0000\u0000\u0000\u0520\u0521"+ + "\u0006\u00a3\u000b\u0000\u0521\u0157\u0001\u0000\u0000\u0000\u0522\u0523"+ + "\u0003@\u0018\u0000\u0523\u0524\u0001\u0000\u0000\u0000\u0524\u0525\u0006"+ + "\u00a4\u000b\u0000\u0525\u0159\u0001\u0000\u0000\u0000\u0526\u0527\u0003"+ + "P \u0000\u0527\u0528\u0001\u0000\u0000\u0000\u0528\u0529\u0006\u00a5\u000e"+ + "\u0000\u0529\u052a\u0006\u00a5\u000f\u0000\u052a\u015b\u0001\u0000\u0000"+ + "\u0000\u052b\u052c\u0005i\u0000\u0000\u052c\u052d\u0005n\u0000\u0000\u052d"+ + "\u052e\u0005f\u0000\u0000\u052e\u052f\u0005o\u0000\u0000\u052f\u015d\u0001"+ + "\u0000\u0000\u0000\u0530\u0531\u0003<\u0016\u0000\u0531\u0532\u0001\u0000"+ + "\u0000\u0000\u0532\u0533\u0006\u00a7\u000b\u0000\u0533\u015f\u0001\u0000"+ + "\u0000\u0000\u0534\u0535\u0003>\u0017\u0000\u0535\u0536\u0001\u0000\u0000"+ + "\u0000\u0536\u0537\u0006\u00a8\u000b\u0000\u0537\u0161\u0001\u0000\u0000"+ + "\u0000\u0538\u0539\u0003@\u0018\u0000\u0539\u053a\u0001\u0000\u0000\u0000"+ + "\u053a\u053b\u0006\u00a9\u000b\u0000\u053b\u0163\u0001\u0000\u0000\u0000"+ + "\u053c\u053d\u0003P \u0000\u053d\u053e\u0001\u0000\u0000\u0000\u053e\u053f"+ + "\u0006\u00aa\u000e\u0000\u053f\u0540\u0006\u00aa\u000f\u0000\u0540\u0165"+ + "\u0001\u0000\u0000\u0000\u0541\u0542\u0005f\u0000\u0000\u0542\u0543\u0005"+ + "u\u0000\u0000\u0543\u0544\u0005n\u0000\u0000\u0544\u0545\u0005c\u0000"+ + "\u0000\u0545\u0546\u0005t\u0000\u0000\u0546\u0547\u0005i\u0000\u0000\u0547"+ + "\u0548\u0005o\u0000\u0000\u0548\u0549\u0005n\u0000\u0000\u0549\u054a\u0005"+ + "s\u0000\u0000\u054a\u0167\u0001\u0000\u0000\u0000\u054b\u054c\u0003<\u0016"+ + "\u0000\u054c\u054d\u0001\u0000\u0000\u0000\u054d\u054e\u0006\u00ac\u000b"+ + "\u0000\u054e\u0169\u0001\u0000\u0000\u0000\u054f\u0550\u0003>\u0017\u0000"+ + "\u0550\u0551\u0001\u0000\u0000\u0000\u0551\u0552\u0006\u00ad\u000b\u0000"+ + "\u0552\u016b\u0001\u0000\u0000\u0000\u0553\u0554\u0003@\u0018\u0000\u0554"+ + "\u0555\u0001\u0000\u0000\u0000\u0555\u0556\u0006\u00ae\u000b\u0000\u0556"+ + "\u016d\u0001\u0000\u0000\u0000\u0557\u0558\u0003\u00b8T\u0000\u0558\u0559"+ + "\u0001\u0000\u0000\u0000\u0559\u055a\u0006\u00af\u0010\u0000\u055a\u055b"+ + "\u0006\u00af\u000f\u0000\u055b\u016f\u0001\u0000\u0000\u0000\u055c\u055d"+ + "\u0005:\u0000\u0000\u055d\u0171\u0001\u0000\u0000\u0000\u055e\u0564\u0003"+ + "\\&\u0000\u055f\u0564\u0003R!\u0000\u0560\u0564\u0003z5\u0000\u0561\u0564"+ + "\u0003T\"\u0000\u0562\u0564\u0003b)\u0000\u0563\u055e\u0001\u0000\u0000"+ + "\u0000\u0563\u055f\u0001\u0000\u0000\u0000\u0563\u0560\u0001\u0000\u0000"+ + "\u0000\u0563\u0561\u0001\u0000\u0000\u0000\u0563\u0562\u0001\u0000\u0000"+ + "\u0000\u0564\u0565\u0001\u0000\u0000\u0000\u0565\u0563\u0001\u0000\u0000"+ + "\u0000\u0565\u0566\u0001\u0000\u0000\u0000\u0566\u0173\u0001\u0000\u0000"+ + "\u0000\u0567\u0568\u0003<\u0016\u0000\u0568\u0569\u0001\u0000\u0000\u0000"+ + "\u0569\u056a\u0006\u00b2\u000b\u0000\u056a\u0175\u0001\u0000\u0000\u0000"+ + "\u056b\u056c\u0003>\u0017\u0000\u056c\u056d\u0001\u0000\u0000\u0000\u056d"+ + "\u056e\u0006\u00b3\u000b\u0000\u056e\u0177\u0001\u0000\u0000\u0000\u056f"+ + "\u0570\u0003@\u0018\u0000\u0570\u0571\u0001\u0000\u0000\u0000\u0571\u0572"+ + "\u0006\u00b4\u000b\u0000\u0572\u0179\u0001\u0000\u0000\u0000\u0573\u0574"+ + "\u0003P \u0000\u0574\u0575\u0001\u0000\u0000\u0000\u0575\u0576\u0006\u00b5"+ + "\u000e\u0000\u0576\u0577\u0006\u00b5\u000f\u0000\u0577\u017b\u0001\u0000"+ + "\u0000\u0000\u0578\u0579\u0003D\u001a\u0000\u0579\u057a\u0001\u0000\u0000"+ + "\u0000\u057a\u057b\u0006\u00b6\u0014\u0000\u057b\u057c\u0006\u00b6\u000f"+ + "\u0000\u057c\u057d\u0006\u00b6 \u0000\u057d\u017d\u0001\u0000\u0000\u0000"+ + "\u057e\u057f\u0003f+\u0000\u057f\u0580\u0001\u0000\u0000\u0000\u0580\u0581"+ + "\u0006\u00b7\u0015\u0000\u0581\u0582\u0006\u00b7\u000f\u0000\u0582\u0583"+ + "\u0006\u00b7 \u0000\u0583\u017f\u0001\u0000\u0000\u0000\u0584\u0585\u0003"+ + "<\u0016\u0000\u0585\u0586\u0001\u0000\u0000\u0000\u0586\u0587\u0006\u00b8"+ + "\u000b\u0000\u0587\u0181\u0001\u0000\u0000\u0000\u0588\u0589\u0003>\u0017"+ + "\u0000\u0589\u058a\u0001\u0000\u0000\u0000\u058a\u058b\u0006\u00b9\u000b"+ + "\u0000\u058b\u0183\u0001\u0000\u0000\u0000\u058c\u058d\u0003@\u0018\u0000"+ + "\u058d\u058e\u0001\u0000\u0000\u0000\u058e\u058f\u0006\u00ba\u000b\u0000"+ + "\u058f\u0185\u0001\u0000\u0000\u0000\u0590\u0591\u0003\u0170\u00b0\u0000"+ + "\u0591\u0592\u0001\u0000\u0000\u0000\u0592\u0593\u0006\u00bb\u0011\u0000"+ + "\u0593\u0594\u0006\u00bb\u000f\u0000\u0594\u0595\u0006\u00bb\u0007\u0000"+ + "\u0595\u0187\u0001\u0000\u0000\u0000\u0596\u0597\u0003v3\u0000\u0597\u0598"+ + "\u0001\u0000\u0000\u0000\u0598\u0599\u0006\u00bc\u0012\u0000\u0599\u059a"+ + "\u0006\u00bc\u000f\u0000\u059a\u059b\u0006\u00bc\u0007\u0000\u059b\u0189"+ + "\u0001\u0000\u0000\u0000\u059c\u059d\u0003<\u0016\u0000\u059d\u059e\u0001"+ + "\u0000\u0000\u0000\u059e\u059f\u0006\u00bd\u000b\u0000\u059f\u018b\u0001"+ + "\u0000\u0000\u0000\u05a0\u05a1\u0003>\u0017\u0000\u05a1\u05a2\u0001\u0000"+ + "\u0000\u0000\u05a2\u05a3\u0006\u00be\u000b\u0000\u05a3\u018d\u0001\u0000"+ + "\u0000\u0000\u05a4\u05a5\u0003@\u0018\u0000\u05a5\u05a6\u0001\u0000\u0000"+ + "\u0000\u05a6\u05a7\u0006\u00bf\u000b\u0000\u05a7\u018f\u0001\u0000\u0000"+ + "\u0000\u05a8\u05a9\u0003\u00beW\u0000\u05a9\u05aa\u0001\u0000\u0000\u0000"+ + "\u05aa\u05ab\u0006\u00c0\u000f\u0000\u05ab\u05ac\u0006\u00c0\u0000\u0000"+ + "\u05ac\u05ad\u0006\u00c0\u001c\u0000\u05ad\u0191\u0001\u0000\u0000\u0000"+ + "\u05ae\u05af\u0003\u00baU\u0000\u05af\u05b0\u0001\u0000\u0000\u0000\u05b0"+ + "\u05b1\u0006\u00c1\u000f\u0000\u05b1\u05b2\u0006\u00c1\u0000\u0000\u05b2"+ + "\u05b3\u0006\u00c1\u001f\u0000\u05b3\u0193\u0001\u0000\u0000\u0000\u05b4"+ + "\u05b5\u0003l.\u0000\u05b5\u05b6\u0001\u0000\u0000\u0000\u05b6\u05b7\u0006"+ + "\u00c2\u000f\u0000\u05b7\u05b8\u0006\u00c2\u0000\u0000\u05b8\u05b9\u0006"+ + "\u00c2!\u0000\u05b9\u0195\u0001\u0000\u0000\u0000\u05ba\u05bb\u0003P "+ + "\u0000\u05bb\u05bc\u0001\u0000\u0000\u0000\u05bc\u05bd\u0006\u00c3\u000e"+ + "\u0000\u05bd\u05be\u0006\u00c3\u000f\u0000\u05be\u0197\u0001\u0000\u0000"+ + "\u0000B\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f"+ "\r\u000e\u000f\u024c\u0256\u025a\u025d\u0266\u0268\u0273\u027a\u027f\u02a6"+ "\u02ab\u02b4\u02bb\u02c0\u02c2\u02cd\u02d5\u02d8\u02da\u02df\u02e4\u02ea"+ - "\u02f1\u02f6\u02fc\u02ff\u0307\u030b\u038d\u0394\u0396\u03a6\u03ab\u03b0"+ - "\u03b2\u03b8\u0411\u0415\u041a\u041f\u0424\u0426\u042a\u042c\u0479\u047d"+ - "\u0482\u0560\u0562\"\u0005\u0002\u0000\u0005\u0004\u0000\u0005\u0006\u0000"+ - "\u0005\u0001\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\f\u0000\u0005"+ - "\u000e\u0000\u0005\n\u0000\u0005\u0005\u0000\u0005\u000b\u0000\u0000\u0001"+ - "\u0000\u0007G\u0000\u0005\u0000\u0000\u0007\u001e\u0000\u0004\u0000\u0000"+ - "\u0007H\u0000\u0007t\u0000\u0007\'\u0000\u0007%\u0000\u0007\u001a\u0000"+ - "\u0007\u001f\u0000\u0007)\u0000\u0007R\u0000\u0005\r\u0000\u0005\u0007"+ - "\u0000\u0007\\\u0000\u0007[\u0000\u0007J\u0000\u0007Z\u0000\u0005\t\u0000"+ - "\u0007I\u0000\u0005\u000f\u0000\u0007\"\u0000"; + "\u02f1\u02f6\u02fc\u02ff\u0307\u030b\u038b\u0390\u0397\u0399\u03a9\u03ae"+ + "\u03b3\u03b5\u03bb\u0414\u0418\u041d\u0422\u0427\u0429\u042d\u042f\u047c"+ + "\u0480\u0485\u0563\u0565\"\u0005\u0002\u0000\u0005\u0004\u0000\u0005\u0006"+ + "\u0000\u0005\u0001\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\f\u0000"+ + "\u0005\u000e\u0000\u0005\n\u0000\u0005\u0005\u0000\u0005\u000b\u0000\u0000"+ + "\u0001\u0000\u0007G\u0000\u0005\u0000\u0000\u0007\u001e\u0000\u0004\u0000"+ + "\u0000\u0007H\u0000\u0007t\u0000\u0007\'\u0000\u0007%\u0000\u0007\u001a"+ + "\u0000\u0007\u001f\u0000\u0007)\u0000\u0007R\u0000\u0005\r\u0000\u0005"+ + "\u0007\u0000\u0007\\\u0000\u0007[\u0000\u0007J\u0000\u0007Z\u0000\u0005"+ + "\t\u0000\u0007I\u0000\u0005\u000f\u0000\u0007\"\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index b1dff5ce8c342..eef2dbbb53362 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -93,7 +93,9 @@ public void testNamedParams() throws IOException { String paramsString = """ ,"params":[ {"n1" : "8.15.0" }, { "n2" : 0.05 }, {"n3" : -799810013 }, - {"n4" : "127.0.0.1"}, {"n5" : "esql"}, {"n_6" : null}, {"n7_" : false}] }"""; + {"n4" : "127.0.0.1"}, {"n5" : "esql"}, {"n_6" : null}, {"n7_" : false}, + {"_n1" : "8.15.0" }, { "__n2" : 0.05 }, {"__3" : -799810013 }, + {"__4n" : "127.0.0.1"}, {"_n5" : "esql"}, {"_n6" : null}, {"_n7" : false}] }"""; List params = new ArrayList<>(4); params.add(new QueryParam("n1", "8.15.0", DataType.KEYWORD)); params.add(new QueryParam("n2", 0.05, DataType.DOUBLE)); @@ -102,6 +104,13 @@ public void testNamedParams() throws IOException { params.add(new QueryParam("n5", "esql", DataType.KEYWORD)); params.add(new QueryParam("n_6", null, DataType.NULL)); params.add(new QueryParam("n7_", false, DataType.BOOLEAN)); + params.add(new QueryParam("_n1", "8.15.0", DataType.KEYWORD)); + params.add(new QueryParam("__n2", 0.05, DataType.DOUBLE)); + params.add(new QueryParam("__3", -799810013, DataType.INTEGER)); + params.add(new QueryParam("__4n", "127.0.0.1", DataType.KEYWORD)); + params.add(new QueryParam("_n5", "esql", DataType.KEYWORD)); + params.add(new QueryParam("_n6", null, DataType.NULL)); + params.add(new QueryParam("_n7", false, DataType.BOOLEAN)); String json = String.format(Locale.ROOT, """ { "query": "%s", @@ -131,7 +140,7 @@ public void testInvalidParams() throws IOException { QueryBuilder filter = randomQueryBuilder(); String paramsString1 = """ - "params":[ {"1" : "v1" }, {"1x" : "v1" }, {"_a" : "v1" }, {"@-#" : "v1" }, 1, 2]"""; + "params":[ {"1" : "v1" }, {"1x" : "v1" }, {"@a" : "v1" }, {"@-#" : "v1" }, 1, 2, {"_1" : "v1" }, {"Å" : 0}, {"x " : 0}]"""; String json1 = String.format(Locale.ROOT, """ { %s @@ -146,16 +155,20 @@ public void testInvalidParams() throws IOException { e1.getCause().getMessage(), containsString( "Failed to parse params: [2:16] [1] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, and contains letters, digits and underscores only" ) ); assertThat(e1.getCause().getMessage(), containsString("[2:31] [1x] is not a valid parameter name")); - assertThat(e1.getCause().getMessage(), containsString("[2:47] [_a] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:47] [@a] is not a valid parameter name")); assertThat(e1.getCause().getMessage(), containsString("[2:63] [@-#] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:102] [Å] is not a valid parameter name")); + assertThat(e1.getCause().getMessage(), containsString("[2:113] [x ] is not a valid parameter name")); + assertThat( e1.getCause().getMessage(), containsString( - "Params cannot contain both named and unnamed parameters; got [{1:v1}, {1x:v1}, {_a:v1}, {@-#:v1}] and [{1}, {2}]" + "Params cannot contain both named and unnamed parameters; " + + "got [{1:v1}, {1x:v1}, {@a:v1}, {@-#:v1}, {_1:v1}, {Å:0}, {x :0}] and [{1}, {2}]" ) ); @@ -175,7 +188,7 @@ public void testInvalidParams() throws IOException { e2.getCause().getMessage(), containsString( "Failed to parse params: [2:22] [1] is not a valid parameter name, " - + "a valid parameter name starts with a letter and contains letters, digits and underscores only" + + "a valid parameter name starts with a letter or underscore, and contains letters, digits and underscores only" ) ); assertThat(e2.getCause().getMessage(), containsString("[2:37] [1x] is not a valid parameter name")); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index a5ef7900a1a78..6980171a7bcd7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -1019,7 +1019,7 @@ public void testInvalidNamedParams() { "Unknown query parameter [n2], did you mean any of [n3, n1]?" ); - expectError("from test | where x < ?_1", List.of(new QueryParam("_1", 5, INTEGER)), "extraneous input '_1' expecting "); + expectError("from test | where x < ?@1", List.of(new QueryParam("@1", 5, INTEGER)), "extraneous input '@1' expecting "); expectError("from test | where x < ?#1", List.of(new QueryParam("#1", 5, INTEGER)), "token recognition error at: '#'"); @@ -1028,6 +1028,10 @@ public void testInvalidNamedParams() { List.of(new QueryParam("n_1", 5, INTEGER), new QueryParam("n_2", 5, INTEGER)), "extraneous input '?' expecting " ); + + expectError("from test | where x < ?Å", List.of(new QueryParam("Å", 5, INTEGER)), "line 1:24: token recognition error at: 'Å'"); + + expectError("from test | eval x = ?Å", List.of(new QueryParam("Å", 5, INTEGER)), "line 1:23: token recognition error at: 'Å'"); } public void testPositionalParams() { @@ -1069,12 +1073,6 @@ public void testInvalidPositionalParams() { + "line 1:35: No parameter is defined for position 2, did you mean position 1?" ); - expectError( - "from test | where x < ?0 and y < ?2", - List.of(new QueryParam(null, 5, INTEGER)), - "No parameter is defined for position 2, did you mean position 1" - ); - expectError( "from test | where x < ?0", List.of(new QueryParam(null, 5, INTEGER), new QueryParam(null, 10, INTEGER)), @@ -1107,6 +1105,18 @@ public void testParamInWhere() { assertThat(limit.children().get(0).children().size(), equalTo(1)); assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement("from test | where x < ?_n1 | limit 10", new QueryParams(List.of(new QueryParam("_n1", 5, INTEGER)))); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Filter.class)); + w = (Filter) limit.children().get(0); + assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); + assertThat(limit.children().get(0).children().size(), equalTo(1)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement("from test | where x < ?1 | limit 10", new QueryParams(List.of(new QueryParam(null, 5, INTEGER)))); assertThat(plan, instanceOf(Limit.class)); limit = (Limit) plan; @@ -1118,6 +1128,18 @@ public void testParamInWhere() { assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); assertThat(limit.children().get(0).children().size(), equalTo(1)); assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement("from test | where x < ?__1 | limit 10", new QueryParams(List.of(new QueryParam("__1", 5, INTEGER)))); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Filter.class)); + w = (Filter) limit.children().get(0); + assertThat(((Literal) w.condition().children().get(1)).value(), equalTo(5)); + assertThat(limit.children().get(0).children().size(), equalTo(1)); + assertThat(limit.children().get(0).children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInEval() { @@ -1161,6 +1183,26 @@ public void testParamInEval() { assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( + "from test | where x < ?_n1 | eval y = ?_n2 + ?_n3 | limit 10", + new QueryParams( + List.of(new QueryParam("_n1", 5, INTEGER), new QueryParam("_n2", -1, INTEGER), new QueryParam("_n3", 100, INTEGER)) + ) + ); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Eval.class)); + eval = (Eval) limit.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(100)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | limit 10", new QueryParams(List.of(new QueryParam(null, 5, INTEGER), new QueryParam(null, -1, INTEGER))) @@ -1178,6 +1220,24 @@ public void testParamInEval() { assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement( + "from test | where x < ?_1 | eval y = ?_2 + ?_1 | limit 10", + new QueryParams(List.of(new QueryParam("_1", 5, INTEGER), new QueryParam("_2", -1, INTEGER))) + ); + assertThat(plan, instanceOf(Limit.class)); + limit = (Limit) plan; + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(((Literal) limit.limit()).value(), equalTo(10)); + assertThat(limit.children().size(), equalTo(1)); + assertThat(limit.children().get(0), instanceOf(Eval.class)); + eval = (Eval) limit.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(5)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamInAggFunction() { @@ -1231,6 +1291,31 @@ public void testParamInAggFunction() { assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( + "from test | where x < ?_n1 | eval y = ?_n2 + ?_n3 | stats count(?_n4) by z", + new QueryParams( + List.of( + new QueryParam("_n1", 5, INTEGER), + new QueryParam("_n2", -1, INTEGER), + new QueryParam("_n3", 100, INTEGER), + new QueryParam("_n4", "*", KEYWORD) + ) + ) + ); + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; + assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); + assertThat(agg.child(), instanceOf(Eval.class)); + assertThat(agg.children().size(), equalTo(1)); + assertThat(agg.children().get(0), instanceOf(Eval.class)); + eval = (Eval) agg.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(100)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + plan = statement( "from test | where x < ?1 | eval y = ?2 + ?1 | stats count(?3) by z", new QueryParams( @@ -1250,6 +1335,26 @@ public void testParamInAggFunction() { assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); assertThat(f.children().size(), equalTo(1)); assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); + + plan = statement( + "from test | where x < ?_1 | eval y = ?_2 + ?_1 | stats count(?_3) by z", + new QueryParams( + List.of(new QueryParam("_1", 5, INTEGER), new QueryParam("_2", -1, INTEGER), new QueryParam("_3", "*", KEYWORD)) + ) + ); + assertThat(plan, instanceOf(Aggregate.class)); + agg = (Aggregate) plan; + assertThat(((Literal) agg.aggregates().get(0).children().get(0).children().get(0)).value(), equalTo("*")); + assertThat(agg.child(), instanceOf(Eval.class)); + assertThat(agg.children().size(), equalTo(1)); + assertThat(agg.children().get(0), instanceOf(Eval.class)); + eval = (Eval) agg.children().get(0); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).left()).value(), equalTo(-1)); + assertThat(((Literal) ((Add) eval.fields().get(0).child()).right()).value(), equalTo(5)); + f = (Filter) eval.children().get(0); + assertThat(((Literal) f.condition().children().get(1)).value(), equalTo(5)); + assertThat(f.children().size(), equalTo(1)); + assertThat(f.children().get(0), instanceOf(UnresolvedRelation.class)); } public void testParamMixed() { @@ -1266,24 +1371,36 @@ public void testParamMixed() { ); expectError( - "from test | where x < ?1 | eval y = ?n2 + ?n3 | limit ?n4", + "from test | where x < ? | eval y = ?_n2 + ?n3 | limit ?_4", List.of( new QueryParam("n1", 5, INTEGER), - new QueryParam("n2", -1, INTEGER), + new QueryParam("_n2", -1, INTEGER), new QueryParam("n3", 100, INTEGER), new QueryParam("n4", 10, INTEGER) ), + "Inconsistent parameter declaration, " + + "use one of positional, named or anonymous params but not a combination of named and anonymous" + ); + + expectError( + "from test | where x < ?1 | eval y = ?n2 + ?_n3 | limit ?n4", + List.of( + new QueryParam("n1", 5, INTEGER), + new QueryParam("n2", -1, INTEGER), + new QueryParam("_n3", 100, INTEGER), + new QueryParam("n4", 10, INTEGER) + ), "Inconsistent parameter declaration, " + "use one of positional, named or anonymous params but not a combination of named and positional" ); expectError( - "from test | where x < ? | eval y = ?2 + ?n3 | limit ?n4", + "from test | where x < ? | eval y = ?2 + ?n3 | limit ?_n4", List.of( new QueryParam("n1", 5, INTEGER), new QueryParam("n2", -1, INTEGER), new QueryParam("n3", 100, INTEGER), - new QueryParam("n4", 10, INTEGER) + new QueryParam("_n4", 10, INTEGER) ), "Inconsistent parameter declaration, " + "use one of positional, named or anonymous params but not a combination of positional and anonymous" @@ -1536,6 +1653,22 @@ public void testSimpleMetricsWithStats() { ); } + public void testInvalidAlias() { + expectError("row Å = 1", "line 1:5: token recognition error at: 'Å'"); + expectError("from test | eval Å = 1", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | where Å == 1", "line 1:19: token recognition error at: 'Å'"); + expectError("from test | keep Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | drop Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | sort Å", "line 1:18: token recognition error at: 'Å'"); + expectError("from test | rename Å as A", "line 1:20: token recognition error at: 'Å'"); + expectError("from test | rename A as Å", "line 1:25: token recognition error at: 'Å'"); + expectError("from test | rename Å as Å", "line 1:20: token recognition error at: 'Å'"); + expectError("from test | stats Å = count(*)", "line 1:19: token recognition error at: 'Å'"); + expectError("from test | stats count(Å)", "line 1:25: token recognition error at: 'Å'"); + expectError("from test | eval A = coalesce(Å, null)", "line 1:31: token recognition error at: 'Å'"); + expectError("from test | eval A = coalesce(\"Å\", Å)", "line 1:36: token recognition error at: 'Å'"); + } + private LogicalPlan unresolvedRelation(String index) { return new UnresolvedRelation(EMPTY, new TableIdentifier(EMPTY, null, index), false, List.of(), IndexMode.STANDARD, null); } From a02dc7165c75f12701f8d47a2bdefe5283735267 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 26 Aug 2024 13:39:19 -0500 Subject: [PATCH 080/352] Improve performance of grok pattern cycle detection (#111947) --- docs/changelog/111947.yaml | 5 + .../org/elasticsearch/grok/PatternBank.java | 144 +++++++++----- .../elasticsearch/grok/PatternBankTests.java | 179 ++++++++++++++++-- 3 files changed, 267 insertions(+), 61 deletions(-) create mode 100644 docs/changelog/111947.yaml diff --git a/docs/changelog/111947.yaml b/docs/changelog/111947.yaml new file mode 100644 index 0000000000000..0aff0b9c7b8be --- /dev/null +++ b/docs/changelog/111947.yaml @@ -0,0 +1,5 @@ +pr: 111947 +summary: Improve performance of grok pattern cycle detection +area: Ingest Node +type: bug +issues: [] diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java index bcf9253866931..3b10d58815169 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java @@ -8,12 +8,17 @@ package org.elasticsearch.grok; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; public class PatternBank { @@ -57,52 +62,102 @@ public PatternBank extendWith(Map extraPatterns) { } /** - * Checks whether patterns reference each other in a circular manner and if so fail with an exception. + * Checks whether patterns reference each other in a circular manner and if so fail with an IllegalArgumentException. It will also + * fail if any pattern value contains a pattern name that does not exist in the bank. *

* In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ static void forbidCircularReferences(Map bank) { - // first ensure that the pattern bank contains no simple circular references (i.e., any pattern - // containing an immediate reference to itself) as those can cause the remainder of this algorithm - // to recurse infinitely - for (Map.Entry entry : bank.entrySet()) { - if (patternReferencesItself(entry.getValue(), entry.getKey())) { - throw new IllegalArgumentException("circular reference in pattern [" + entry.getKey() + "][" + entry.getValue() + "]"); + Set allVisitedNodes = new HashSet<>(); + Set nodesVisitedMoreThanOnceInAPath = new HashSet<>(); + // Walk the full path starting at each node in the graph: + for (String traversalStartNode : bank.keySet()) { + if (nodesVisitedMoreThanOnceInAPath.contains(traversalStartNode) == false && allVisitedNodes.contains(traversalStartNode)) { + // If we have seen this node before in a path, and it only appeared once in that path, there is no need to check it again + continue; } - } - - // next, recursively check any other pattern names referenced in each pattern - for (Map.Entry entry : bank.entrySet()) { - String name = entry.getKey(); - String pattern = entry.getValue(); - innerForbidCircularReferences(bank, name, new ArrayList<>(), pattern); + Set visitedFromThisStartNode = new LinkedHashSet<>(); + /* + * This stack records where we are in the graph. Each String[] in the stack represents a collection of neighbors to the first + * non-null node in the layer below it. Null means that the path from that location has been fully traversed. Once all nodes + * at a layer have been set to null, the layer is popped. So for example say we have the graph + * ( 1 -> (2 -> (4, 5, 8), 3 -> (6, 7))) then when we are at 6 via 1 -> 3 -> 6, the stack looks like this: + * [6, 7] + * [null, 3] + * [1] + */ + Deque stack = new ArrayDeque<>(); + stack.push(new String[] { traversalStartNode }); + // This is used so that we know that we're unwinding the stack and know not to get the current node's neighbors again. + boolean unwinding = false; + while (stack.isEmpty() == false) { + String[] currentLevel = stack.peek(); + int firstNonNullIndex = findFirstNonNull(currentLevel); + String node = currentLevel[firstNonNullIndex]; + boolean endOfThisPath = false; + if (unwinding) { + // We have completed all of this node's neighbors and have popped back to the node + endOfThisPath = true; + } else if (traversalStartNode.equals(node) && stack.size() > 1) { + Deque reversedPath = new ArrayDeque<>(); + for (String[] level : stack) { + reversedPath.push(level[findFirstNonNull(level)]); + } + throw new IllegalArgumentException("circular reference detected: " + String.join("->", reversedPath)); + } else if (visitedFromThisStartNode.contains(node)) { + /* + * We are only looking for a cycle starting and ending at traversalStartNode right now. But this node has been + * visited more than once in the path rooted at traversalStartNode. This could be because it is a cycle, or could be + * because two nodes in the path both point to it. We add it to nodesVisitedMoreThanOnceInAPath so that we make sure + * to check the path rooted at this node later. + */ + nodesVisitedMoreThanOnceInAPath.add(node); + endOfThisPath = true; + } else { + visitedFromThisStartNode.add(node); + String[] neighbors = getPatternNamesForPattern(bank, node); + if (neighbors.length == 0) { + endOfThisPath = true; + } else { + stack.push(neighbors); + } + } + if (endOfThisPath) { + if (firstNonNullIndex == currentLevel.length - 1) { + // We have handled all the neighbors at this level -- there are no more non-null ones + stack.pop(); + unwinding = true; + } else { + currentLevel[firstNonNullIndex] = null; + unwinding = false; + } + } else { + unwinding = false; + } + } + allVisitedNodes.addAll(visitedFromThisStartNode); } } - private static void innerForbidCircularReferences(Map bank, String patternName, List path, String pattern) { - if (patternReferencesItself(pattern, patternName)) { - String message; - if (path.isEmpty()) { - message = "circular reference in pattern [" + patternName + "][" + pattern + "]"; - } else { - message = "circular reference in pattern [" - + path.remove(path.size() - 1) - + "][" - + pattern - + "] back to pattern [" - + patternName - + "]"; - // add rest of the path: - if (path.isEmpty() == false) { - message += " via patterns [" + String.join("=>", path) + "]"; - } + private static int findFirstNonNull(String[] level) { + for (int i = 0; i < level.length; i++) { + if (level[i] != null) { + return i; } - throw new IllegalArgumentException(message); } + return -1; + } - // next check any other pattern names found in the pattern + /** + * This method returns the array of pattern names (if any) found in the bank for the pattern named patternName. If no pattern names + * are found, an empty array is returned. If any of the list of pattern names to be returned does not exist in the bank, an exception + * is thrown. + */ + private static String[] getPatternNamesForPattern(Map bank, String patternName) { + String pattern = bank.get(patternName); + List patternReferences = new ArrayList<>(); for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { int begin = i + 2; int bracketIndex = pattern.indexOf('}', begin); @@ -112,25 +167,22 @@ private static void innerForbidCircularReferences(Map bank, Stri end = bracketIndex; } else if (columnIndex != -1 && bracketIndex == -1) { end = columnIndex; - } else if (bracketIndex != -1 && columnIndex != -1) { + } else if (bracketIndex != -1) { end = Math.min(bracketIndex, columnIndex); } else { throw new IllegalArgumentException("pattern [" + pattern + "] has an invalid syntax"); } String otherPatternName = pattern.substring(begin, end); - path.add(otherPatternName); - String otherPattern = bank.get(otherPatternName); - if (otherPattern == null) { - throw new IllegalArgumentException( - "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" - ); + if (patternReferences.contains(otherPatternName) == false) { + patternReferences.add(otherPatternName); + String otherPattern = bank.get(otherPatternName); + if (otherPattern == null) { + throw new IllegalArgumentException( + "pattern [" + patternName + "] is referencing a non-existent pattern [" + otherPatternName + "]" + ); + } } - - innerForbidCircularReferences(bank, patternName, path, otherPattern); } - } - - private static boolean patternReferencesItself(String pattern, String patternName) { - return pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); + return patternReferences.toArray(new String[0]); } } diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java index dcc7ab431611a..08a4965cdb371 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/PatternBankTests.java @@ -11,8 +11,13 @@ import org.elasticsearch.test.ESTestCase; import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.Map; -import java.util.TreeMap; +import java.util.Set; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.containsString; public class PatternBankTests extends ESTestCase { @@ -32,7 +37,7 @@ public void testBankCannotBeNull() { public void testConstructorValidatesCircularReferences() { var e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); } public void testExtendWith() { @@ -48,36 +53,36 @@ public void testExtendWith() { public void testCircularReference() { var e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name}!!!"))); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows( IllegalArgumentException.class, () -> { PatternBank.forbidCircularReferences(Map.of("NAME", "!!!%{NAME:name:int}!!!")); } ); - assertEquals("circular reference in pattern [NAME][!!!%{NAME:name:int}!!!]", e.getMessage()); + assertEquals("circular reference detected: NAME->NAME", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME2][!!!%{NAME1}!!!] back to pattern [NAME1]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", e.getMessage()); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> { - Map bank = new TreeMap<>(); + Map bank = new LinkedHashMap<>(); bank.put("NAME1", "!!!%{NAME2}!!!"); bank.put("NAME2", "!!!%{NAME3}!!!"); bank.put("NAME3", "!!!%{NAME4}!!!"); @@ -85,10 +90,78 @@ public void testCircularReference() { bank.put("NAME5", "!!!%{NAME1}!!!"); PatternBank.forbidCircularReferences(bank); }); - assertEquals( - "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2=>NAME3=>NAME4]", - e.getMessage() - ); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME4->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!%{NAME2}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME2->NAME3->NAME2", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME2}!!%{NAME3}!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME3->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!%{NAME3}%{NAME4}"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME4->NAME5->NAME1", e.getMessage()); + + { + Map bank = new HashMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + } + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME2->NAME5->NAME1", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new LinkedHashMap<>(); + bank.put("NAME1", "!!!%{NAME2} %{NAME3}!!!"); + bank.put("NAME2", "!!!%{NAME4} %{NAME5}!!!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + bank.put("NAME4", "!!!!!!"); + bank.put("NAME5", "!!!!!!"); + PatternBank.forbidCircularReferences(bank); + }); + assertEquals("circular reference detected: NAME1->NAME3->NAME1", e.getMessage()); } public void testCircularSelfReference() { @@ -96,7 +169,7 @@ public void testCircularSelfReference() { IllegalArgumentException.class, () -> PatternBank.forbidCircularReferences(Map.of("ANOTHER", "%{INT}", "INT", "%{INT}")) ); - assertEquals("circular reference in pattern [INT][%{INT}]", e.getMessage()); + assertEquals("circular reference detected: INT->INT", e.getMessage()); } public void testInvalidPatternReferences() { @@ -112,4 +185,80 @@ public void testInvalidPatternReferences() { ); assertEquals("pattern [%{VALID] has an invalid syntax", e.getMessage()); } + + public void testDeepGraphOfPatterns() { + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 20_000; + for (int i = 0; i < nodeCount - 1; i++) { + patternBankMap.put("FOO" + i, "%{FOO" + (i + 1) + "}"); + } + patternBankMap.put("FOO" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithoutCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. But there are no cycles in any of + * these since each pattern only references patterns with a higher ID. We don't expect any exceptions here. + */ + Map patternBankMap = randomBoolean() ? new HashMap<>() : new LinkedHashMap<>(); + final int nodeCount = 500; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + for (int j = 0; j < randomIntBetween(0, 20); j++) { + patternBuilder.append("%{FOO-" + randomIntBetween(i + 1, nodeCount - 1) + "}"); + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + new PatternBank(patternBankMap); + } + + public void testRandomBanksWithCycles() { + /* + * This creates a large number of pattens, each of which refers to a large number of patterns. We have at least one cycle because + * we pick a node at random, and make sure that a node that it links (or one of its descendants) to links back. If no descendant + * links back to it, we create an artificial cycle at the end. + */ + Map patternBankMap = new LinkedHashMap<>(); + final int nodeCount = 500; + int nodeToHaveCycle = randomIntBetween(0, nodeCount); + int nodeToPotentiallyCreateCycle = -1; + boolean haveCreatedCycle = false; + for (int i = 0; i < nodeCount - 1; i++) { + StringBuilder patternBuilder = new StringBuilder(); + int numberOfLinkedPatterns = randomIntBetween(1, 20); + int nodeToLinkBackIndex = randomIntBetween(0, numberOfLinkedPatterns); + Set childNodes = new HashSet<>(); + for (int j = 0; j < numberOfLinkedPatterns; j++) { + int childNode = randomIntBetween(i + 1, nodeCount - 1); + childNodes.add(childNode); + patternBuilder.append("%{FOO-" + childNode + "}"); + if (i == nodeToHaveCycle) { + if (nodeToLinkBackIndex == j) { + nodeToPotentiallyCreateCycle = childNode; + } + } + } + if (i == nodeToPotentiallyCreateCycle) { + // We either create the cycle here, or randomly pick a child node to maybe create the cycle + if (randomBoolean()) { + patternBuilder.append("%{FOO-" + nodeToHaveCycle + "}"); + haveCreatedCycle = true; + } else { + nodeToPotentiallyCreateCycle = randomFrom(childNodes); + } + } + patternBankMap.put("FOO-" + i, patternBuilder.toString()); + } + if (haveCreatedCycle) { + patternBankMap.put("FOO-" + (nodeCount - 1), "foo"); + } else { + // We didn't randomly create a cycle, so just force one in this last pattern + nodeToHaveCycle = nodeCount - 1; + patternBankMap.put("FOO-" + nodeToHaveCycle, "%{FOO-" + nodeToHaveCycle + "}"); + } + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PatternBank(patternBankMap)); + assertThat(e.getMessage(), containsString("FOO-" + nodeToHaveCycle)); + } } From 74d964b9b16b7c8837ec0869b15e2c2bdee416cd Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 26 Aug 2024 15:01:22 -0400 Subject: [PATCH 081/352] ESQL: Fix a bug in `MV_PERCENTILE` (#112218) This fixes a bug in `MV_PERCENTILE` that was producing incorrect results on when the `Block` was in ascending order. We were always reading from the first entry in the block. Closes #112188 Closes #112187 Closes #112193 Closes #112180 --- docs/changelog/112218.yaml | 9 + muted-tests.yml | 12 -- .../scalar/multivalue/MvPercentile.java | 22 ++- .../multivalue/MvPercentileSimpleTests.java | 154 ++++++++++++++++++ .../scalar/multivalue/MvPercentileTests.java | 17 +- 5 files changed, 194 insertions(+), 20 deletions(-) create mode 100644 docs/changelog/112218.yaml create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml new file mode 100644 index 0000000000000..c426dd7ade4ed --- /dev/null +++ b/docs/changelog/112218.yaml @@ -0,0 +1,9 @@ +pr: 112218 +summary: "ESQL: Fix a bug in `MV_PERCENTILE`" +area: ES|QL +type: bug +issues: + - 112193 + - 112180 + - 112187 + - 112188 diff --git a/muted-tests.yml b/muted-tests.yml index 85c29759cabb2..eff599b758a1d 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -151,24 +151,12 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112144 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/112147 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112180 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {mv_percentile.FromIndex SYNC} - issue: https://github.com/elastic/elasticsearch/issues/112187 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {mv_percentile.FromIndex ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112188 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} issue: https://github.com/elastic/elasticsearch/issues/112191 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {mv_percentile.FromIndex ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/112193 - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAsync issue: https://github.com/elastic/elasticsearch/issues/112212 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java index b1e710b9b2a40..1eb0c70a7b08e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentile.java @@ -233,7 +233,7 @@ static void process( // Percentile calculators - private static double calculateDoublePercentile( + static double calculateDoublePercentile( DoubleBlock valuesBlock, int firstValueIndex, int valueCount, @@ -257,7 +257,11 @@ private static double calculateDoublePercentile( return valuesBlock.getDouble(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculateDoublePercentile(fraction, valuesBlock.getDouble(lowerIndex), valuesBlock.getDouble(upperIndex)); + return calculateDoublePercentile( + fraction, + valuesBlock.getDouble(firstValueIndex + lowerIndex), + valuesBlock.getDouble(firstValueIndex + upperIndex) + ); } } @@ -289,7 +293,7 @@ private static double calculateDoublePercentile( return calculateDoublePercentile(fraction, scratch.values[lowerIndex], scratch.values[upperIndex]); } - private static int calculateIntPercentile( + static int calculateIntPercentile( IntBlock valuesBlock, int firstValueIndex, int valueCount, @@ -313,8 +317,8 @@ private static int calculateIntPercentile( return valuesBlock.getInt(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - var lowerValue = valuesBlock.getInt(lowerIndex); - var upperValue = valuesBlock.getInt(upperIndex); + var lowerValue = valuesBlock.getInt(firstValueIndex + lowerIndex); + var upperValue = valuesBlock.getInt(firstValueIndex + upperIndex); var difference = (long) upperValue - lowerValue; return lowerValue + (int) (fraction * difference); } @@ -351,7 +355,7 @@ private static int calculateIntPercentile( return lowerValue + (int) (fraction * difference); } - private static long calculateLongPercentile( + static long calculateLongPercentile( LongBlock valuesBlock, int firstValueIndex, int valueCount, @@ -375,7 +379,11 @@ private static long calculateLongPercentile( return valuesBlock.getLong(valueCount - 1); } else { assert lowerIndex >= 0 && upperIndex < valueCount; - return calculateLongPercentile(fraction, valuesBlock.getLong(lowerIndex), valuesBlock.getLong(upperIndex)); + return calculateLongPercentile( + fraction, + valuesBlock.getLong(firstValueIndex + lowerIndex), + valuesBlock.getLong(firstValueIndex + upperIndex) + ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java new file mode 100644 index 0000000000000..81ae8efb7aba7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileSimpleTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; + +import static org.hamcrest.Matchers.equalTo; + +public class MvPercentileSimpleTests extends ESTestCase { + public void testDoubleMvAsc() { + try (DoubleBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newDoubleBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendDouble(80); + builder.appendDouble(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendDouble(-6.33); + builder.appendDouble(-3.34); + builder.appendDouble(-0.31); + builder.appendDouble(6.23); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (DoubleBlock block = builder.build()) { + MvPercentile.DoubleSortingScratch scratch = new MvPercentile.DoubleSortingScratch(); + double p0 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + double p1 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87.5)); + assertThat(p1, equalTo(1.325)); + } + } + } + + public void testDoubleRandomOrder() { + try (DoubleBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newDoubleBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendDouble(80); + builder.appendDouble(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendDouble(-3.34); + builder.appendDouble(-6.33); + builder.appendDouble(6.23); + builder.appendDouble(-0.31); + builder.endPositionEntry(); + try (DoubleBlock block = builder.build()) { + MvPercentile.DoubleSortingScratch scratch = new MvPercentile.DoubleSortingScratch(); + double p0 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + double p1 = MvPercentile.calculateDoublePercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87.5)); + assertThat(p1, equalTo(1.325)); + } + } + } + + public void testIntMvAsc() { + try (IntBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newIntBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendInt(80); + builder.appendInt(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendInt(-6); + builder.appendInt(-3); + builder.appendInt(0); + builder.appendInt(6); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (IntBlock block = builder.build()) { + MvPercentile.IntSortingScratch scratch = new MvPercentile.IntSortingScratch(); + int p0 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + int p1 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87)); + assertThat(p1, equalTo(1)); + } + } + } + + public void testIntRandomOrder() { + try (IntBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newIntBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendInt(80); + builder.appendInt(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendInt(-3); + builder.appendInt(-6); + builder.appendInt(6); + builder.appendInt(0); + builder.endPositionEntry(); + try (IntBlock block = builder.build()) { + MvPercentile.IntSortingScratch scratch = new MvPercentile.IntSortingScratch(); + int p0 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + int p1 = MvPercentile.calculateIntPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87)); + assertThat(p1, equalTo(1)); + } + } + } + + public void testLongMvAsc() { + try (LongBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newLongBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendLong(80); + builder.appendLong(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendLong(-6); + builder.appendLong(-3); + builder.appendLong(0); + builder.appendLong(6); + builder.endPositionEntry(); + builder.mvOrdering(Block.MvOrdering.SORTED_ASCENDING); + try (LongBlock block = builder.build()) { + MvPercentile.LongSortingScratch scratch = new MvPercentile.LongSortingScratch(); + long p0 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + long p1 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87L)); + assertThat(p1, equalTo(1L)); + } + } + } + + public void testLongRandomOrder() { + try (LongBlock.Builder builder = TestBlockFactory.getNonBreakingInstance().newLongBlockBuilder(10)) { + builder.beginPositionEntry(); + builder.appendLong(80); + builder.appendLong(90); + builder.endPositionEntry(); + builder.beginPositionEntry(); + builder.appendLong(-3); + builder.appendLong(-6); + builder.appendLong(6); + builder.appendLong(0); + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + MvPercentile.LongSortingScratch scratch = new MvPercentile.LongSortingScratch(); + long p0 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(0), block.getValueCount(0), 75, scratch); + long p1 = MvPercentile.calculateLongPercentile(block, block.getFirstValueIndex(1), block.getValueCount(1), 75, scratch); + assertThat(p0, equalTo(87L)); + assertThat(p1, equalTo(1L)); + } + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java index 3410b95458302..29cc959e6a943 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvPercentileTests.java @@ -59,7 +59,7 @@ public static Iterable parameters() { } } - for (var percentileType : List.of(INTEGER, LONG, DataType.DOUBLE)) { + for (var percentileType : List.of(INTEGER, LONG, DOUBLE)) { cases.addAll( List.of( // Doubles @@ -334,6 +334,21 @@ public static Iterable parameters() { ); } } + cases.add( + new TestCaseSupplier( + "from example", + List.of(DOUBLE, INTEGER), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(List.of(-3.34, -6.33, 6.23, -0.31), DOUBLE, "field"), + new TestCaseSupplier.TypedData(75, INTEGER, "percentile") + ), + evaluatorString(DOUBLE, INTEGER), + DOUBLE, + equalTo(1.325) + ) + ) + ); return parameterSuppliersFromTypedDataWithDefaultChecks( (nullPosition, nullValueDataType, original) -> nullValueDataType == DataType.NULL && nullPosition == 0 From 631a63c9ed87bb03dc447a4c0ed528d37e87c24e Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 26 Aug 2024 15:44:23 -0400 Subject: [PATCH 082/352] [CI] Add lucene snapshot pipeline schedules for lucene_snapshot_10 branch (#112215) --- .../scripts/lucene-snapshot/update-branch.sh | 10 +++++----- .../lucene-snapshot/update-es-snapshot.sh | 4 ++-- catalog-info.yaml | 18 +++++++++++++++--- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.buildkite/scripts/lucene-snapshot/update-branch.sh b/.buildkite/scripts/lucene-snapshot/update-branch.sh index d02123f3236e7..6a2d1e3df05f7 100755 --- a/.buildkite/scripts/lucene-snapshot/update-branch.sh +++ b/.buildkite/scripts/lucene-snapshot/update-branch.sh @@ -2,17 +2,17 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on lucene_snapshot branches" exit 1 fi -echo --- Updating lucene_snapshot branch with main +echo --- Updating "$BUILDKITE_BRANCH" branch with main git config --global user.name elasticsearchmachine git config --global user.email 'infra-root+elasticsearchmachine@elastic.co' -git checkout lucene_snapshot +git checkout "$BUILDKITE_BRANCH" git fetch origin main git merge --no-edit origin/main -git push origin lucene_snapshot +git push origin "$BUILDKITE_BRANCH" diff --git a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh index 75f42a32cb590..7bec83d055139 100755 --- a/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh +++ b/.buildkite/scripts/lucene-snapshot/update-es-snapshot.sh @@ -2,8 +2,8 @@ set -euo pipefail -if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot" ]]; then - echo "Error: This script should only be run on the lucene_snapshot branch" +if [[ "$BUILDKITE_BRANCH" != "lucene_snapshot"* ]]; then + echo "Error: This script should only be run on the lucene_snapshot branches" exit 1 fi diff --git a/catalog-info.yaml b/catalog-info.yaml index dfeeae51c1b3a..e57841c9de268 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -125,7 +125,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -142,6 +142,10 @@ spec: branch: lucene_snapshot cronline: "0 2 * * * America/New_York" message: "Builds a new lucene snapshot 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 2 * * * America/New_York" + message: "Builds a new lucene snapshot 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -169,7 +173,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -186,6 +190,10 @@ spec: branch: lucene_snapshot cronline: "0 6 * * * America/New_York" message: "Merges main into lucene_snapshot branch 1x per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 6 * * * America/New_York" + message: "Merges main into lucene_snapshot_10 branch 1x per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -213,7 +221,7 @@ spec: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" SLACK_NOTIFICATIONS_CHANNEL: "#lucene" SLACK_NOTIFICATIONS_ALL_BRANCHES: "true" - branch_configuration: lucene_snapshot + branch_configuration: lucene_snapshot lucene_snapshot_10 default_branch: lucene_snapshot teams: elasticsearch-team: {} @@ -230,6 +238,10 @@ spec: branch: lucene_snapshot cronline: "0 9,12,15,18 * * * America/New_York" message: "Runs tests against lucene_snapshot branch several times per day" + Periodically on lucene_snapshot_10: + branch: lucene_snapshot_10 + cronline: "0 9,12,15,18 * * * America/New_York" + message: "Runs tests against lucene_snapshot_10 branch several times per day" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 From ef95cdd4cce3cdb3c788dd6c2de122dcc7f82d4a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 26 Aug 2024 18:51:12 -0700 Subject: [PATCH 083/352] Fix native library loading zstd with jna (#112221) Recent refactoring of native library paths broke jna loading zstd. This commit fixes jna to set the jna.library.path during init so that jna calls to load libraries still work. --- .../nativeaccess/jna/JnaNativeLibraryProvider.java | 11 +++++++++++ .../elasticsearch/nativeaccess/lib/LoaderHelper.java | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 79caf04c97246..e0233187425ea 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -8,9 +8,11 @@ package org.elasticsearch.nativeaccess.jna; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; @@ -23,6 +25,10 @@ public class JnaNativeLibraryProvider extends NativeLibraryProvider { + static { + setJnaLibraryPath(); + } + public JnaNativeLibraryProvider() { super( "jna", @@ -45,6 +51,11 @@ public JnaNativeLibraryProvider() { ); } + @SuppressForbidden(reason = "jna library path must be set for load library to work with our own libs") + private static void setJnaLibraryPath() { + System.setProperty("jna.library.path", LoaderHelper.platformLibDir.toString()); + } + private static Supplier notImplemented() { return () -> { throw new AssertionError(); }; } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java index 4da52c415c040..42ca60b81a027 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -16,7 +16,7 @@ * A utility for loading libraries from Elasticsearch's platform specific lib dir. */ public class LoaderHelper { - private static final Path platformLibDir = findPlatformLibDir(); + public static final Path platformLibDir = findPlatformLibDir(); private static Path findPlatformLibDir() { // tests don't have an ES install, so the platform dir must be passed in explicitly From 535e9edced9995e8411b46622e29f8ae006ab4f1 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 27 Aug 2024 06:38:11 +0400 Subject: [PATCH 084/352] Add ingest-geoip module to rest-resources-zip (#112216) --- modules/ingest-geoip/build.gradle | 4 ++++ x-pack/rest-resources-zip/build.gradle | 1 + 2 files changed, 5 insertions(+) diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 5bdb6da5c7b29..bc5bb165cd0d2 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -88,3 +88,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") } + +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index cc5bddf12d801..0133ff80dfadf 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -21,6 +21,7 @@ dependencies { freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') freeTests project(path: ':modules:analysis-common', configuration: 'restTests') + freeTests project(path: ':modules:ingest-geoip', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') From d14fe7733b2ce361e08c05624668fddbf2763a86 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 27 Aug 2024 17:03:01 +1000 Subject: [PATCH 085/352] Expand RecordingInstrucments to support collection of observers (#112195) The support is needed for RecordingInstruments to be used in tests for guages with a collection of observers. Relates: #110630 --- .../telemetry/RecordingInstruments.java | 29 ++++++++----- .../telemetry/RecordingMeterRegistry.java | 42 +++++++++++-------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java index 35417c16e7e1c..49e667bb74e5b 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.LongUpDownCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -53,7 +54,7 @@ public String getName() { } } - protected interface NumberWithAttributesObserver extends Supplier>> { + protected interface NumberWithAttributesObserver extends Supplier>>> { } @@ -74,7 +75,7 @@ public void run() { return; } var observation = observer.get(); - call(observation.v1(), observation.v2()); + observation.forEach(o -> call(o.v1(), o.v2())); } } @@ -109,10 +110,10 @@ public void incrementBy(double inc, Map attributes) { } public static class RecordingDoubleGauge extends CallbackRecordingInstrument implements DoubleGauge { - public RecordingDoubleGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingDoubleGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } @@ -172,10 +173,14 @@ public void incrementBy(long inc, Map attributes) { public static class RecordingAsyncLongCounter extends CallbackRecordingInstrument implements LongAsyncCounter { - public RecordingAsyncLongCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncLongCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -183,10 +188,14 @@ public RecordingAsyncLongCounter(String name, Supplier obser public static class RecordingAsyncDoubleCounter extends CallbackRecordingInstrument implements DoubleAsyncCounter { - public RecordingAsyncDoubleCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncDoubleCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -194,10 +203,10 @@ public RecordingAsyncDoubleCounter(String name, Supplier o public static class RecordingLongGauge extends CallbackRecordingInstrument implements LongGauge { - public RecordingLongGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingLongGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java index 97fe0ad1370ef..392445aa77a8f 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.Collection; +import java.util.Collections; import java.util.function.Supplier; /** @@ -72,9 +73,7 @@ protected DoubleUpDownCounter buildDoubleUpDownCounter(String name, String descr @Override public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { - DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -84,7 +83,9 @@ public DoubleGauge registerDoublesGauge( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -92,7 +93,12 @@ public DoubleGauge getDoubleGauge(String name) { return (DoubleGauge) recorder.getInstrument(InstrumentType.DOUBLE_GAUGE, name); } - protected DoubleGauge buildDoubleGauge(String name, String description, String unit, Supplier observer) { + protected DoubleGauge buildDoubleGauge( + String name, + String description, + String unit, + Supplier> observer + ) { return new RecordingInstruments.RecordingDoubleGauge(name, observer, recorder); } @@ -121,9 +127,7 @@ public LongCounter registerLongCounter(String name, String description, String u @Override public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { - LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -133,7 +137,9 @@ public LongAsyncCounter registerLongsAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -148,9 +154,7 @@ public DoubleAsyncCounter registerDoubleAsyncCounter( String unit, Supplier observer ) { - DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -160,7 +164,9 @@ public DoubleAsyncCounter registerDoublesAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -196,14 +202,14 @@ protected LongUpDownCounter buildLongUpDownCounter(String name, String descripti @Override public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { - LongGauge instrument = buildLongGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override public LongGauge registerLongsGauge(String name, String description, String unit, Supplier> observer) { - throw new UnsupportedOperationException("not implemented"); + LongGauge instrument = buildLongGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -211,7 +217,7 @@ public LongGauge getLongGauge(String name) { return (LongGauge) recorder.getInstrument(InstrumentType.LONG_GAUGE, name); } - protected LongGauge buildLongGauge(String name, String description, String unit, Supplier observer) { + protected LongGauge buildLongGauge(String name, String description, String unit, Supplier> observer) { return new RecordingInstruments.RecordingLongGauge(name, observer, recorder); } From 303b2274766595c2bbbd2b339345cfa6b6a2009e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:05:46 +0100 Subject: [PATCH 086/352] Add link to warning re. single-node clusters (#112114) Expands the message added in #88013 to include a link to the relevant docs. --- .../cluster/coordination/Coordinator.java | 7 +++++-- .../java/org/elasticsearch/common/ReferenceDocs.java | 1 + .../elasticsearch/common/reference-docs-links.json | 3 ++- .../cluster/coordination/CoordinatorTests.java | 11 ++++++++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 437219b312045..e922d130d7f83 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -831,10 +832,12 @@ public void run() { discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \ not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \ cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \ - the node's data path(s). Remove the discovery configuration to suppress this message.""", + the node's data path(s). Remove the discovery configuration to suppress this message. See [{}] for \ + more information.""", applierState.metadata().clusterUUID(), DISCOVERY_SEED_HOSTS_SETTING.getKey(), - DISCOVERY_SEED_HOSTS_SETTING.get(settings) + DISCOVERY_SEED_HOSTS_SETTING.get(settings), + ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS ); } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index f710ae7c3b84a..59c55fb7b624a 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -81,6 +81,7 @@ public enum ReferenceDocs { MAX_SHARDS_PER_NODE, FLOOD_STAGE_WATERMARK, X_OPAQUE_ID, + FORMING_SINGLE_NODE_CLUSTERS, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index 8288ca792b0f1..3eb8939c22a65 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -41,5 +41,6 @@ "LUCENE_MAX_DOCS_LIMIT": "size-your-shards.html#troubleshooting-max-docs-limit", "MAX_SHARDS_PER_NODE": "size-your-shards.html#troubleshooting-max-shards-open", "FLOOD_STAGE_WATERMARK": "fix-watermark-errors.html", - "X_OPAQUE_ID": "api-conventions.html#x-opaque-id" + "X_OPAQUE_ID": "api-conventions.html#x-opaque-id", + "FORMING_SINGLE_NODE_CLUSTERS": "modules-discovery-bootstrap-cluster.html#modules-discovery-bootstrap-cluster-joining" } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b57badb3a180f..bf64b29d364e0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -79,6 +80,8 @@ import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -1762,7 +1765,13 @@ public void testLogsWarningPeriodicallyIfSingleNodeClusterHasSeedHosts() { @Override public void match(LogEvent event) { final String message = event.getMessage().getFormattedMessage(); - assertThat(message, startsWith("This node is a fully-formed single-node cluster with cluster UUID")); + assertThat( + message, + allOf( + startsWith("This node is a fully-formed single-node cluster with cluster UUID"), + containsString(ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS.toString()) + ) + ); loggedClusterUuid = (String) event.getMessage().getParameters()[0]; } From ec90d2c1239bf848914dc4411c676a1f05f2777a Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:05 +0100 Subject: [PATCH 087/352] Reduce nesting in restore-snapshot path (#112107) Also cleans up the exception-handling a little to ensure that all failures are logged. --- .../snapshots/RestoreService.java | 114 +++++++++--------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0f03cfab4ad2e..d8987495f9035 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; @@ -56,7 +57,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; @@ -92,9 +92,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -248,62 +248,66 @@ public void restoreSnapshot( final BiConsumer updater ) { assert Repository.assertSnapshotMetaThread(); - try { - // Try and fill in any missing repository UUIDs in case they're needed during the restore - final var repositoryUuidRefreshStep = new ListenableFuture(); - refreshRepositoryUuids( - refreshRepositoryUuidOnRestore, - repositoriesService, - () -> repositoryUuidRefreshStep.onResponse(null), - snapshotMetaExecutor - ); - // Read snapshot info and metadata from the repository - final String repositoryName = request.repository(); - Repository repository = repositoriesService.repository(repositoryName); - final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - repository.getRepositoryData(snapshotMetaExecutor, repositoryDataListener); - - repositoryDataListener.addListener( - listener.delegateFailureAndWrap( - (delegate, repositoryData) -> repositoryUuidRefreshStep.addListener( - delegate.delegateFailureAndWrap((subDelegate, ignored) -> { - assert Repository.assertSnapshotMetaThread(); - final String snapshotName = request.snapshot(); - final Optional matchingSnapshotId = repositoryData.getSnapshotIds() - .stream() - .filter(s -> snapshotName.equals(s.getName())) - .findFirst(); - if (matchingSnapshotId.isPresent() == false) { - throw new SnapshotRestoreException(repositoryName, snapshotName, "snapshot does not exist"); - } + // Try and fill in any missing repository UUIDs in case they're needed during the restore + final var repositoryUuidRefreshStep = SubscribableListener.newForked( + l -> refreshRepositoryUuids(refreshRepositoryUuidOnRestore, repositoriesService, () -> l.onResponse(null), snapshotMetaExecutor) + ); - final SnapshotId snapshotId = matchingSnapshotId.get(); - if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { - throw new SnapshotRestoreException( - repositoryName, - snapshotName, - "snapshot UUID mismatch: expected [" - + request.snapshotUuid() - + "] but got [" - + snapshotId.getUUID() - + "]" - ); - } - repository.getSnapshotInfo( - snapshotId, - subDelegate.delegateFailureAndWrap( - (l, snapshotInfo) -> startRestore(snapshotInfo, repository, request, repositoryData, updater, l) - ) - ); - }) - ) + // AtomicReference just so we have somewhere to hold these objects, there's no interesting concurrency here + final AtomicReference repositoryRef = new AtomicReference<>(); + final AtomicReference repositoryDataRef = new AtomicReference<>(); + + SubscribableListener + + .newForked(repositorySetListener -> { + // do this within newForked for exception handling + repositoryRef.set(repositoriesService.repository(request.repository())); + repositorySetListener.onResponse(null); + }) + + .andThen( + repositoryDataListener -> repositoryRef.get().getRepositoryData(snapshotMetaExecutor, repositoryDataListener) + ) + .andThenAccept(repositoryDataRef::set) + .andThen(repositoryUuidRefreshStep::addListener) + + .andThen(snapshotInfoListener -> { + assert Repository.assertSnapshotMetaThread(); + final String snapshotName = request.snapshot(); + final SnapshotId snapshotId = repositoryDataRef.get() + .getSnapshotIds() + .stream() + .filter(s -> snapshotName.equals(s.getName())) + .findFirst() + .orElseThrow(() -> new SnapshotRestoreException(request.repository(), snapshotName, "snapshot does not exist")); + + if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { + throw new SnapshotRestoreException( + request.repository(), + snapshotName, + "snapshot UUID mismatch: expected [" + request.snapshotUuid() + "] but got [" + snapshotId.getUUID() + "]" + ); + } + + repositoryRef.get().getSnapshotInfo(snapshotId, snapshotInfoListener); + }) + + .andThen( + (responseListener, snapshotInfo) -> startRestore( + snapshotInfo, + repositoryRef.get(), + request, + repositoryDataRef.get(), + updater, + responseListener ) - ); - } catch (Exception e) { - logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); - listener.onFailure(e); - } + ) + + .addListener(listener.delegateResponse((delegate, e) -> { + logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); + delegate.onFailure(e); + })); } /** From bff45aaa8a2d53d3de44c66a2c692664fa3b3d46 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:20 +0100 Subject: [PATCH 088/352] Reduce `CompletableFuture` usage in tests (#111848) Fixes some spots in tests where we use `CompletableFuture` instead of one of the preferred alternatives. --- .../grok/MatcherWatchdogTests.java | 9 +- .../action/bulk/BulkOperationTests.java | 136 +++++------------- .../ingest/ConditionalProcessorTests.java | 8 +- .../ingest/PipelineProcessorTests.java | 10 +- .../security/authc/ApiKeyServiceTests.java | 16 +-- 5 files changed, 53 insertions(+), 126 deletions(-) diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java index b66778743aec0..5ed1a7d13b80a 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java @@ -7,12 +7,12 @@ */ package org.elasticsearch.grok; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.test.ESTestCase; import org.joni.Matcher; import org.mockito.Mockito; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -77,16 +77,17 @@ public void testIdleIfNothingRegistered() throws Exception { ); // Periodic action is not scheduled because no thread is registered verifyNoMoreInteractions(threadPool); - CompletableFuture commandFuture = new CompletableFuture<>(); + + PlainActionFuture commandFuture = new PlainActionFuture<>(); // Periodic action is scheduled because a thread is registered doAnswer(invocationOnMock -> { - commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]); + commandFuture.onResponse(invocationOnMock.getArgument(0)); return null; }).when(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)); Matcher matcher = mock(Matcher.class); watchdog.register(matcher); // Registering the first thread should have caused the command to get scheduled again - Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS); + Runnable command = safeGet(commandFuture); Mockito.reset(threadPool); watchdog.unregister(matcher); command.run(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index e950901a538b4..0c0e1de74a3e7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterName; @@ -60,9 +61,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -201,9 +200,6 @@ public void tearDownThreadpool() { public void testClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Not retryable ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK).build()) @@ -215,9 +211,10 @@ public void testClusterBlockedFailsBulk() { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); } /** @@ -226,9 +223,6 @@ public void testClusterBlockedFailsBulk() { public void testTimeoutOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -248,9 +242,11 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); verify(observer, times(2)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -261,9 +257,6 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -278,9 +271,10 @@ public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -296,12 +290,7 @@ public void testBulkToIndex() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -318,12 +307,7 @@ public void testBulkToIndexFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(indexMetadata.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -344,12 +328,7 @@ public void testBulkToDataStream() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -366,12 +345,7 @@ public void testBulkToDataStreamFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds1BackingIndex2.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -396,12 +370,7 @@ public void testFailingEntireShardRedirectsToFailureStore() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -426,12 +395,7 @@ public void testFailingDocumentRedirectsToFailureStore() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("test"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -465,12 +429,7 @@ public void testFailureStoreShardFailureRejectsDocument() throws Exception { ) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -500,16 +459,12 @@ public void testFailedDocumentCanNotBeConvertedFails() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("root cause"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Mock a failure store document converter that always fails FailureStoreDocumentConverter mockConverter = mock(FailureStoreDocumentConverter.class); when(mockConverter.transformFailedRequest(any(), any(), any(), any())).thenThrow(new IOException("Could not serialize json")); - newBulkOperation(client, bulkRequest, mockConverter, listener).run(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, mockConverter, l).run()); - BulkResponse bulkItemResponses = future.get(); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -579,13 +534,10 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { return null; }).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.notifyOnce( - ActionListener.wrap(future::complete, future::completeExceptionally) + final SubscribableListener responseListener = SubscribableListener.newForked( + l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run() ); - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - // The operation will attempt to write the documents in the request, receive a failure, wait for a stable cluster state, and then // redirect the failed documents to the failure store. Wait for that failure store write to start: if (readyToPerformFailureStoreWrite.await(30, TimeUnit.SECONDS) == false) { @@ -595,7 +547,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { } // Check to make sure there is no response yet - if (future.isDone()) { + if (responseListener.isDone()) { // we're going to fail the test, but be a good citizen and unblock the other thread first beginFailureStoreWrite.countDown(); fail("bulk operation completed prematurely"); @@ -605,7 +557,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { beginFailureStoreWrite.countDown(); // Await final result and verify - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(responseListener); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -650,12 +602,7 @@ public void testBlockedClusterRejectsFailureStoreDocument() throws Exception { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait on non retryable block")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -715,12 +662,7 @@ public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -775,12 +717,10 @@ public void testNodeClosureRejectsFailureStoreDocument() { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); @@ -832,12 +772,7 @@ public void testLazilyRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds3FailureStore2.getIndex().getName())) .findFirst() @@ -880,12 +815,7 @@ public void testFailureWhileRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) .findFirst() diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java index 3a6de10b5901d..546b252615b28 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.IngestConditionalScript; import org.elasticsearch.script.MockScriptEngine; @@ -25,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -242,14 +242,14 @@ public boolean execute(Map ctx) { private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { String scriptName = "conditionalScript"; - CompletableFuture expectedException = new CompletableFuture<>(); + PlainActionFuture expectedException = new PlainActionFuture<>(); ScriptService scriptService = new ScriptService( Settings.builder().build(), Map.of(Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Map.of(scriptName, ctx -> { try { mutation.accept(ctx); } catch (Exception e) { - expectedException.complete(e); + expectedException.onResponse(e); } return false; }), Map.of())), @@ -267,7 +267,7 @@ private static void assertMutatingCtxThrows(Consumer> mutati IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); ingestDocument.setFieldValue("listField", new ArrayList<>()); execProcessor(processor, ingestDocument, (result, e) -> {}); - Exception e = expectedException.get(); + Exception e = safeGet(expectedException); assertThat(e, instanceOf(UnsupportedOperationException.class)); assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); assertStats(processor, 0, 0, 0); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java index cfbdbc3792082..d9058e83acfe0 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; @@ -16,7 +17,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; @@ -32,12 +32,12 @@ public class PipelineProcessorTests extends ESTestCase { public void testExecutesPipeline() throws Exception { String pipelineId = "pipeline"; IngestService ingestService = createIngestService(); - CompletableFuture invoked = new CompletableFuture<>(); + PlainActionFuture invoked = new PlainActionFuture<>(); IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); Pipeline pipeline = new Pipeline(pipelineId, null, null, null, new CompoundProcessor(new Processor() { @Override - public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { - invoked.complete(ingestDocument); + public IngestDocument execute(final IngestDocument ingestDocument) { + invoked.onResponse(ingestDocument); return ingestDocument; } @@ -61,7 +61,7 @@ public String getDescription() { Map config = new HashMap<>(); config.put("name", pipelineId); factory.create(Map.of(), null, null, config).execute(testIngestDocument, (result, e) -> {}); - assertIngestDocument(testIngestDocument, invoked.get()); + assertIngestDocument(testIngestDocument, safeGet(invoked)); } public void testThrowsOnMissingPipeline() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index f4d75434b92de..fa6eb307933ec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -145,7 +145,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -3442,15 +3441,12 @@ public static Authentication createApiKeyAuthentication( Authentication.newApiKeyAuthentication(authenticationResult, "node01"), threadContext ); - final CompletableFuture authFuture = new CompletableFuture<>(); - securityContext.executeAfterRewritingAuthentication((c) -> { - try { - authFuture.complete(authenticationContextSerializer.readFromContext(threadContext)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }, version); - return authFuture.get(); + return safeAwait( + l -> securityContext.executeAfterRewritingAuthentication( + c -> ActionListener.completeWith(l, () -> authenticationContextSerializer.readFromContext(threadContext)), + version + ) + ); } public static Authentication createApiKeyAuthentication(ApiKeyService apiKeyService, Authentication authentication) From 6d886bc48d71076d37a07faceb1e421b95ec48fd Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Tue, 27 Aug 2024 09:20:59 +0200 Subject: [PATCH 089/352] Add dataset for full text search testing (#112105) --- .../xpack/esql/CsvTestsDataLoader.java | 4 +- .../testFixtures/src/main/resources/books.csv | 80 +++++++++++++++++ .../src/main/resources/mapping-books.json | 30 +++++++ .../main/resources/match-operator.csv-spec | 90 ++++++++++++------- .../src/main/resources/match.csv-spec | 53 ++++++----- 5 files changed, 203 insertions(+), 54 deletions(-) create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index d5e70d264c9be..b20e3bb0d5409 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -100,6 +100,7 @@ public class CsvTestsDataLoader { private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); private static final TestsDataset ADDRESSES = new TestsDataset("addresses", "mapping-addresses.json", "addresses.csv", null, true); + private static final TestsDataset BOOKS = new TestsDataset("books", "mapping-books.json", "books.csv", null, true); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), @@ -126,7 +127,8 @@ public class CsvTestsDataLoader { Map.entry(DATE_NANOS.indexName, DATE_NANOS), Map.entry(K8S.indexName, K8S), Map.entry(DISTANCES.indexName, DISTANCES), - Map.entry(ADDRESSES.indexName, ADDRESSES) + Map.entry(ADDRESSES.indexName, ADDRESSES), + Map.entry(BOOKS.indexName, BOOKS) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv new file mode 100644 index 0000000000000..1deefaa3c6475 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv @@ -0,0 +1,80 @@ +book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text +2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. +7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. +7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. +2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th +4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. +2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. +7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. +7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. +4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie +9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. +9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature +1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. +2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi +2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments +8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. +8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit +6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. +4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. +4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. +2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. +8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. +6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. +3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college +5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. +5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community +6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa +5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. +3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! +4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. +7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. +3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. +3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. +4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. +6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. +3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. +8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. +1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion +6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. +2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. +7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. +6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. +8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. +2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. +1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). +5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. +4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. +2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. +7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. +2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. +2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. +9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. +5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. +2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. +7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. +5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. +8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. +4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. +7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. +1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. +8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. +8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. +2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. +3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. +5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. +4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. +9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. +5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. +8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. +6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. +8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json new file mode 100644 index 0000000000000..29e3c94c579b1 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json @@ -0,0 +1,30 @@ +{ + "properties": { + "book_no": { + "type": "keyword" + }, + "title": { + "type": "text" + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "description": { + "type": "text" + }, + "publisher": { + "type": "text" + }, + "ratings": { + "type": "float" + }, + "year": { + "type": "integer" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec index 574f27b8c1fed..56eded5ce4603 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec @@ -4,65 +4,89 @@ singleMatchWithTextField required_capability: match_operator -from airports | where name match "london" | keep abbrev, name | sort abbrev; +from books | where author match "William Faulkner" | keep book_no, author | sort book_no | LIMIT 5; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton +book_no:keyword | author:text +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] +2713 | William Faulkner +2847 | Colleen Faulkner +2883 | William Faulkner +3293 | Danny Faulkner ; singleMatchWithKeywordField required_capability: match_operator -from airports | where abbrev match "LTN" | keep abbrev, name | sort abbrev; +from books | where author.keyword match "William Faulkner" | keep book_no, author | sort book_no; -abbrev:keyword | name:text -LTN | London Luton +book_no:keyword | author:text +2713 | William Faulkner +2883 | William Faulkner +4724 | William Faulkner +4977 | William Faulkner +5119 | William Faulkner +5404 | William Faulkner +5578 | William Faulkner +8077 | William Faulkner +9896 | William Faulkner ; multipleMatch required_capability: match_operator -from airports | where name match "london" or name match "liverpool "| keep abbrev, name | sort abbrev; +from books +| where (description match "Sauron" OR description match "Dark Lord") AND + (author match "J. R. R. Tolkien" OR author match "John Ronald Reuel Tolkien") +| keep book_no, title, author +| sort book_no +| limit 4 +; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LPL | Liverpool John Lennon -LTN | London Luton +book_no:keyword | title:text | author:text +1463 | Realms of Tolkien: Images of Middle-earth | J. R. R. Tolkien +2675 | The Lord of the Rings - Boxed Set | J.R.R. Tolkien +2714 | Return of the King Being the Third Part of The Lord of the Rings | J. R. R. Tolkien +2936 | Fellowship of the Ring 2ND Edition | John Ronald Reuel Tolkien ; multipleWhereWithMatch required_capability: match_operator -from airports | where name match "john" | WHERE name match "St" | keep abbrev, name | sort abbrev; +from books +| where title match "short stories" +| where author match "Ursula K. Le Guin" +| keep book_no, title, author +| sort book_no +; -abbrev:keyword | name:text -YXJ | Fort St. John (N. Peace) +book_no:keyword | title:text | author:text +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin ; combinedMatchWithFunctions required_capability: match_operator -from airports -| where name match "john" AND country match "Canada" AND scalerank > 5 -| where length(name) > 10 -| keep abbrev, name, country, scalerank -| sort abbrev +from books +| where title match "Tolkien" AND author match "Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| keep book_no, title, author, year +| sort book_no ; -abbrev:keyword | name:text | country:keyword | scalerank: integer -YHM | John C. Munro Hamilton Int'l | Canada | 8 -YXJ | Fort St. John (N. Peace) | Canada | 8 +book_no:keyword | title:text | author:text | year:integer +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 ; matchWithStats required_capability: match_operator -from airports -| where name match "john" AND scalerank > 5 -| where length(name) > 10 -| stats count(*) BY type -| sort type +from books +| where author match "faulkner" AND year > 1990 +| where mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword ; -count(*): long | type:keyword -1 | major -2 | mid +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +8 | William Faulkner ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec index bdc11c78c8f48..2bc2a865c0052 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec @@ -1,34 +1,47 @@ matchKeywordField required_capability: match_command -from employees | match "first_name: Ma*" | keep emp_no, first_name | sort emp_no; - -emp_no:integer | first_name:keyword -10011 |Mary -10020 |Mayuko -10042 |Magy -10054 |Mayumi -10069 |Margareta +from books | match "author.keyword: *Stein*" | keep book_no, author | sort book_no; + +book_no:keyword | author:text +7381 | Bettilu Stein Faulkner ; -matchMultipleKeywordFields +matchMultipleTextFields required_capability: match_command -from employees | match "+first_name: Ma* +last_name:*man" | keep emp_no, first_name, last_name | sort emp_no; +from books | match "title:Return* AND author:*Tolkien" | keep book_no, title | sort book_no; -emp_no:integer | first_name:keyword | last_name:keyword -10069 |Margareta | Bierman +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +7350 | Return of the Shadow ; -matchTextField +matchAllFields required_capability: match_command -from airports | match "lon*" | keep abbrev, name | sort abbrev; +from books | match "dark AND lord AND Sauron" | keep book_no, title | sort book_no; + +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +2936 | Fellowship of the Ring 2ND Edition +; + +matchWithWhereFunctionsAndStats +required_capability: match_command + +from books +| match "Faulkner AND ratings:>4.0" +| where year > 1950 and mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword +; -abbrev:keyword | name:text -CGQ | Changchun Longjia Int'l -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton -LYR | Svalbard Longyear +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +1 | William Faulkner ; From f152839faf082b5f93ecd718d4c297584c545ffe Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 09:11:14 +0100 Subject: [PATCH 090/352] Remove `InterruptedEx.` from snapshot test harness (#112228) Relates #111957 --- .../snapshots/AbstractSnapshotIntegTestCase.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 1b49209b49c7f..1656a09daa123 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -288,7 +288,7 @@ public static void failReadsAllDataNodes(String repository) { } } - public static void waitForBlockOnAnyDataNode(String repository) throws InterruptedException { + public static void waitForBlockOnAnyDataNode(String repository) { final boolean blocked = waitUntil(() -> { for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); @@ -475,13 +475,13 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li return createSnapshot(repositoryName, snapshot, indices, Collections.singletonList(NO_FEATURE_STATES_VALUE)); } - protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException { + protected void createIndexWithRandomDocs(String indexName, int docCount) { createIndex(indexName); ensureGreen(); indexRandomDocs(indexName, docCount); } - protected void indexRandomDocs(String index, int numdocs) throws InterruptedException { + protected void indexRandomDocs(String index, int numdocs) { logger.info("--> indexing [{}] documents into [{}]", numdocs, index); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { From 9db177887820c2a210aea1c041a88c162754f034 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 27 Aug 2024 09:21:43 +0100 Subject: [PATCH 091/352] Use StreamOutput::writeWriteable instead of writeTo directly (#112027) --- .../lifecycle/action/GetDataStreamLifecycleStatsAction.java | 2 +- .../src/main/java/org/elasticsearch/cluster/ClusterState.java | 2 +- .../elasticsearch/cluster/version/CompatibilityVersions.java | 2 +- .../elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java | 2 +- .../elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java | 2 +- .../org/elasticsearch/xpack/esql/session/Configuration.java | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java index 6e930defd4e0b..71f07c8cac668 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -76,7 +76,7 @@ public Response(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(runDuration); out.writeOptionalVLong(timeBetweenStarts); - out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + out.writeCollection(dataStreamStats, StreamOutput::writeWriteable); } public Long getRunDuration() { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c54269da68507..30e9a9a3779d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1081,7 +1081,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); + out.writeMap(compatibilityVersions, StreamOutput::writeWriteable); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { clusterFeatures.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java index c1489afc6c369..8ebb24e86105a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -120,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { TransportVersion.writeVersion(this.transportVersion(), out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { - out.writeMap(this.systemIndexMappingsVersion(), (o, v) -> v.writeTo(o)); + out.writeMap(this.systemIndexMappingsVersion(), StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java index c1903a2910629..5a91e997ca5fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java @@ -108,6 +108,6 @@ public Map getUsageStatsByTier() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(usageStatsByTier, (o, v) -> v.writeTo(o)); + out.writeMap(usageStatsByTier, StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index e976a8d9be48e..cec4a5a3509a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -332,7 +332,7 @@ private static class LookupResponse extends TransportResponse { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, (o, v) -> v.writeTo(o)); + out.writeMap(policies, StreamOutput::writeWriteable); out.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java index a2777c97e919a..33a48d2e7df05 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java @@ -117,7 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(profile); } if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { - out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, (o2, column) -> column.writeTo(o2))); + out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, StreamOutput::writeWriteable)); } } From 25fdcd29276c508e5c69f6d855dc97daed8cfc08 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 27 Aug 2024 11:22:52 +0200 Subject: [PATCH 092/352] ES|QL: cache EsField on serialization (#112008) As a follow-up to https://github.com/elastic/elasticsearch/pull/111447, with this change we also cache `EsFields`. This gives us an additional 30-40% reduction on the size of serialized plan, according to [these tests](https://github.com/elastic/elasticsearch/pull/111980) Related to https://github.com/elastic/elasticsearch/issues/111358 --- .../org/elasticsearch/TransportVersions.java | 1 + .../test/AbstractWireSerializingTestCase.java | 9 ++- .../esql/core/expression/FieldAttribute.java | 4 +- .../xpack/esql/core/type/DateEsField.java | 14 ++--- .../xpack/esql/core/type/EsField.java | 61 +++++++++++++------ .../esql/core/type/InvalidMappedField.java | 21 ++----- .../xpack/esql/core/type/KeywordEsField.java | 17 ++---- .../esql/core}/type/MultiTypeEsField.java | 18 ++---- .../xpack/esql/core/type/TextEsField.java | 13 ++-- .../esql/core/type/UnsupportedEsField.java | 15 ++--- .../xpack/esql/core/util/PlanStreamInput.java | 3 + .../esql/core/util/PlanStreamOutput.java | 10 +++ .../xpack/esql/analysis/Analyzer.java | 2 +- .../esql/enrich/EnrichPolicyResolver.java | 13 ++-- .../esql/enrich/ResolvedEnrichPolicy.java | 2 +- .../function/UnsupportedAttribute.java | 12 +++- .../xpack/esql/index/EsIndex.java | 8 +-- .../xpack/esql/io/stream/PlanStreamInput.java | 49 ++++++++++++++- .../esql/io/stream/PlanStreamOutput.java | 38 ++++++++++++ .../planner/EsPhysicalOperationProviders.java | 2 +- .../xpack/esql/plugin/EsqlPlugin.java | 4 -- .../xpack/esql/SerializationTestUtils.java | 2 - .../AbstractExpressionSerializationTests.java | 2 - .../xpack/esql/expression/AliasTests.java | 2 - .../function/AbstractAttributeTestCase.java | 2 - .../function/FieldAttributeTests.java | 2 +- .../function/UnsupportedAttributeTests.java | 2 +- .../esql/index/EsIndexSerializationTests.java | 32 ++++++---- .../esql/io/stream/PlanNamedTypesTests.java | 2 +- .../esql/io/stream/PlanStreamOutputTests.java | 37 ++++++++++- ...AbstractLogicalPlanSerializationTests.java | 2 - ...bstractPhysicalPlanSerializationTests.java | 2 - .../ExchangeSinkExecSerializationTests.java | 12 ++-- .../esql}/type/AbstractEsFieldTypeTests.java | 42 ++++++++++--- .../esql}/type/DataTypeConversionTests.java | 5 +- .../xpack/esql}/type/DateEsFieldTests.java | 5 +- .../xpack/esql}/type/EsFieldTests.java | 5 +- .../esql}/type/InvalidMappedFieldTests.java | 5 +- .../xpack/esql}/type/KeywordEsFieldTests.java | 4 +- .../esql/type/MultiTypeEsFieldTests.java | 16 ++--- .../xpack/esql}/type/TextEsFieldTests.java | 5 +- .../esql}/type/UnsupportedEsFieldTests.java | 5 +- 42 files changed, 336 insertions(+), 171 deletions(-) rename x-pack/plugin/{esql/src/main/java/org/elasticsearch/xpack/esql => esql-core/src/main/java/org/elasticsearch/xpack/esql/core}/type/MultiTypeEsField.java (86%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/AbstractEsFieldTypeTests.java (57%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DataTypeConversionTests.java (99%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DateEsFieldTests.java (89%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/EsFieldTests.java (91%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/InvalidMappedFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/KeywordEsFieldTests.java (92%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/TextEsFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/UnsupportedEsFieldTests.java (91%) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 78f1b21ea7a44..33f483c57b54e 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -197,6 +197,7 @@ static TransportVersion def(int id) { public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); + public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java index 82d7f98f34301..b4503a69acca3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java @@ -25,11 +25,18 @@ public abstract class AbstractWireSerializingTestCase exten */ protected abstract Writeable.Reader instanceReader(); + /** + * Returns a {@link Writeable.Writer} that will be used to serialize the instance + */ + protected Writeable.Writer instanceWriter() { + return StreamOutput::writeWriteable; + } + /** * Copy the {@link Writeable} by round tripping it through {@linkplain StreamInput} and {@linkplain StreamOutput}. */ @Override protected final T copyInstance(T instance, TransportVersion version) throws IOException { - return copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), version); + return copyInstance(instance, getNamedWriteableRegistry(), instanceWriter(), instanceReader(), version); } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index 8e8973a11bc8a..37f2cf863d53e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -112,7 +112,7 @@ private FieldAttribute(StreamInput in) throws IOException { in.readOptionalWriteable(FieldAttribute::readFrom), in.readString(), DataType.readFrom(in), - in.readNamedWriteable(EsField.class), + EsField.readFrom(in), in.readOptionalString(), in.readEnum(Nullability.class), NameId.readFrom((StreamInput & PlanStreamInput) in), @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(parent); out.writeString(name()); dataType().writeTo(out); - out.writeNamedWriteable(field); + field.writeTo(out); // We used to write the qualifier here. We can still do if needed in the future. out.writeOptionalString(null); out.writeEnum(nullable()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java index 01728954a2e1b..f829bcdea94e4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,7 +16,6 @@ * Information about a field in an ES index with the {@code date} type */ public class DateEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "DateEsField", DateEsField::new); public static DateEsField dateEsField(String name, Map properties, boolean hasDocValues) { return new DateEsField(name, DataType.DATETIME, properties, hasDocValues); @@ -27,19 +25,19 @@ private DateEsField(String name, DataType dataType, Map propert super(name, dataType, properties, hasDocValues); } - private DateEsField(StreamInput in) throws IOException { - this(in.readString(), DataType.DATETIME, in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean()); + protected DateEsField(StreamInput in) throws IOException { + this(in.readString(), DataType.DATETIME, in.readImmutableMap(EsField::readFrom), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "DateEsField"; } + } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java index eb17d720d2140..899986fecd012 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java @@ -7,34 +7,40 @@ package org.elasticsearch.xpack.esql.core.type; import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; -import java.util.List; import java.util.Map; import java.util.Objects; /** * Information about a field in an ES index. */ -public class EsField implements NamedWriteable { - public static List getNamedWriteables() { - return List.of( - EsField.ENTRY, - DateEsField.ENTRY, - InvalidMappedField.ENTRY, - KeywordEsField.ENTRY, - TextEsField.ENTRY, - UnsupportedEsField.ENTRY - ); +public class EsField implements Writeable { + + private static Map> readers = Map.ofEntries( + Map.entry("EsField", EsField::new), + Map.entry("DateEsField", DateEsField::new), + Map.entry("InvalidMappedField", InvalidMappedField::new), + Map.entry("KeywordEsField", KeywordEsField::new), + Map.entry("MultiTypeEsField", MultiTypeEsField::new), + Map.entry("TextEsField", TextEsField::new), + Map.entry("UnsupportedEsField", UnsupportedEsField::new) + ); + + public static Writeable.Reader getReader(String name) { + Reader result = readers.get(name); + if (result == null) { + throw new IllegalArgumentException("Invalid EsField type [" + name + "]"); + } + return result; } - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "EsField", EsField::new); - private final DataType esDataType; private final boolean aggregatable; private final Map properties; @@ -53,10 +59,10 @@ public EsField(String name, DataType esDataType, Map properties this.isAlias = isAlias; } - public EsField(StreamInput in) throws IOException { + protected EsField(StreamInput in) throws IOException { this.name = in.readString(); this.esDataType = readDataType(in); - this.properties = in.readImmutableMap(i -> i.readNamedWriteable(EsField.class)); + this.properties = in.readImmutableMap(EsField::readFrom); this.aggregatable = in.readBoolean(); this.isAlias = in.readBoolean(); } @@ -77,18 +83,33 @@ private DataType readDataType(StreamInput in) throws IOException { return DataType.readFrom(name); } + public static A readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readEsFieldWithCache(); + } + @Override public void writeTo(StreamOutput out) throws IOException { + if (((PlanStreamOutput) out).writeEsFieldCacheHeader(this)) { + writeContent(out); + } + } + + /** + * This needs to be overridden by subclasses for specific serialization + */ + protected void writeContent(StreamOutput out) throws IOException { out.writeString(name); esDataType.writeTo(out); - out.writeMap(properties, StreamOutput::writeNamedWriteable); + out.writeMap(properties, (o, x) -> x.writeTo(out)); out.writeBoolean(aggregatable); out.writeBoolean(isAlias); } - @Override + /** + * This needs to be overridden by subclasses for specific serialization + */ public String getWriteableName() { - return ENTRY.name; + return "EsField"; } /** diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 8b15893f8a056..d34af0f8565c7 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; @@ -27,11 +26,6 @@ * It is used specifically for the 'union types' feature in ES|QL. */ public class InvalidMappedField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "InvalidMappedField", - InvalidMappedField::new - ); private final String errorMessage; private final Map> typesToIndices; @@ -44,10 +38,6 @@ public InvalidMappedField(String name, String errorMessage) { this(name, errorMessage, new TreeMap<>()); } - public InvalidMappedField(String name) { - this(name, StringUtils.EMPTY, new TreeMap<>()); - } - /** * Constructor supporting union types, used in ES|QL. */ @@ -61,8 +51,8 @@ private InvalidMappedField(String name, String errorMessage, Map i.readNamedWriteable(EsField.class))); + protected InvalidMappedField(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom)); } public Set types() { @@ -70,15 +60,14 @@ public Set types() { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(errorMessage); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "InvalidMappedField"; } public String errorMessage() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java index d856e3d9d8297..33dcebaf3dec2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,11 +20,6 @@ * Information about a field in an ES index with the {@code keyword} type. */ public class KeywordEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "KeywordEsField", - KeywordEsField::new - ); private final int precision; private final boolean normalized; @@ -63,11 +57,11 @@ protected KeywordEsField( this.normalized = normalized; } - private KeywordEsField(StreamInput in) throws IOException { + public KeywordEsField(StreamInput in) throws IOException { this( in.readString(), KEYWORD, - in.readMap(i -> i.readNamedWriteable(EsField.class)), + in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readInt(), in.readBoolean(), @@ -76,18 +70,17 @@ private KeywordEsField(StreamInput in) throws IOException { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeInt(precision); out.writeBoolean(normalized); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "KeywordEsField"; } public int getPrecision() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java similarity index 86% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java rename to x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java index 8b2fc926379f2..81dc77eddcdf8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java @@ -5,15 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.type; +package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.io.IOException; import java.util.HashMap; @@ -31,11 +27,6 @@ * type conversion is done at the data node level. */ public class MultiTypeEsField extends EsField { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "MultiTypeEsField", - MultiTypeEsField::new - ); private final Map indexToConversionExpressions; @@ -44,21 +35,20 @@ public MultiTypeEsField(String name, DataType dataType, boolean aggregatable, Ma this.indexToConversionExpressions = indexToConversionExpressions; } - public MultiTypeEsField(StreamInput in) throws IOException { + protected MultiTypeEsField(StreamInput in) throws IOException { this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> i.readNamedWriteable(Expression.class))); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getDataType().typeName()); out.writeBoolean(isAggregatable()); out.writeMap(getIndexToConversionExpressions(), (o, v) -> out.writeNamedWriteable(v)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "MultiTypeEsField"; } public Map getIndexToConversionExpressions() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java index c52230fa65829..0f2f136e74423 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Tuple; @@ -23,7 +22,6 @@ * Information about a field in an es index with the {@code text} type. */ public class TextEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "TextEsField", TextEsField::new); public TextEsField(String name, Map properties, boolean hasDocValues) { this(name, properties, hasDocValues, false); @@ -33,21 +31,20 @@ public TextEsField(String name, Map properties, boolean hasDocV super(name, TEXT, properties, hasDocValues, isAlias); } - private TextEsField(StreamInput in) throws IOException { - this(in.readString(), in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean(), in.readBoolean()); + protected TextEsField(StreamInput in) throws IOException { + this(in.readString(), in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "TextEsField"; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java index 13e4d6ad953a8..13ee2b42a321b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,11 +19,6 @@ * All the subfields (properties) of an unsupported type are also be unsupported. */ public class UnsupportedEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "UnsupportedEsField", - UnsupportedEsField::new - ); private final String originalType; private final String inherited; // for fields belonging to parents (or grandparents) that have an unsupported type @@ -40,20 +34,19 @@ public UnsupportedEsField(String name, String originalType, String inherited, Ma } public UnsupportedEsField(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readOptionalString(), in.readMap(i -> i.readNamedWriteable(EsField.class))); + this(in.readString(), in.readString(), in.readOptionalString(), in.readImmutableMap(EsField::readFrom)); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getOriginalType()); out.writeOptionalString(getInherited()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "UnsupportedEsField"; } public String getOriginalType() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index 01a153feeb473..471c9476ad31d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -44,4 +45,6 @@ public interface PlanStreamInput { * @throws IOException */ A readAttributeWithCache(CheckedFunction constructor) throws IOException; + + A readEsFieldWithCache() throws IOException; } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java index cec68c06e492e..4c30cb66e9f86 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.core.util; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -21,4 +22,13 @@ public interface PlanStreamOutput { * @throws IOException */ boolean writeAttributeCacheHeader(Attribute attribute) throws IOException; + + /** + * Writes a cache header for an {@link org.elasticsearch.xpack.esql.core.type.EsField} and caches it if it is not already in the cache. + * In that case, the field will have to serialize itself into this stream immediately after this method call. + * @param field The EsField to serialize + * @return true if the attribute needs to serialize itself, false otherwise (ie. if already cached) + * @throws IOException + */ + boolean writeEsFieldCacheHeader(EsField field) throws IOException; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 5b59117ad356b..f88c603b4cacb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -80,7 +81,6 @@ import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index cec4a5a3509a1..f77bfa6d3f862 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -40,6 +40,9 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -326,14 +329,16 @@ private static class LookupResponse extends TransportResponse { } LookupResponse(StreamInput in) throws IOException { - this.policies = in.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); - this.failures = in.readMap(StreamInput::readString, StreamInput::readString); + PlanStreamInput planIn = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), null); + this.policies = planIn.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); + this.failures = planIn.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, StreamOutput::writeWriteable); - out.writeMap(failures, StreamOutput::writeString); + PlanStreamOutput pso = new PlanStreamOutput(out, new PlanNameRegistry(), null); + pso.writeMap(policies, StreamOutput::writeWriteable); + pso.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java index 44443973764e6..63f22bd40ac39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java @@ -29,7 +29,7 @@ public ResolvedEnrichPolicy(StreamInput in) throws IOException { in.readString(), in.readStringCollectionAsList(), in.readMap(StreamInput::readString), - in.readMap(EsField::new) + in.readMap(EsField::readFrom) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index 78577aa2b91e0..5961d1c21bb02 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -75,7 +77,9 @@ private UnsupportedAttribute(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readString(), - new UnsupportedEsField(in), + in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) + ? EsField.readFrom(in) + : new UnsupportedEsField(in), in.readOptionalString(), NameId.readFrom((PlanStreamInput) in) ); @@ -86,7 +90,11 @@ public void writeTo(StreamOutput out) throws IOException { if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { Source.EMPTY.writeTo(out); out.writeString(name()); - field().writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + field().writeTo(out); + } else { + field().writeContent(out); + } out.writeOptionalString(hasCustomMessage ? message : null); id().writeTo(out); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index 92fa2f76ec8b2..d368c570a3f76 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -36,17 +36,13 @@ public EsIndex(String name, Map mapping, Set concreteIn @SuppressWarnings("unchecked") public EsIndex(StreamInput in) throws IOException { - this( - in.readString(), - in.readImmutableMap(StreamInput::readString, i -> i.readNamedWriteable(EsField.class)), - (Set) in.readGenericValue() - ); + this(in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom), (Set) in.readGenericValue()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name()); - out.writeMap(mapping(), StreamOutput::writeNamedWriteable); + out.writeMap(mapping(), (o, x) -> x.writeTo(out)); out.writeGenericValue(concreteIndices()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index c8e744dfff054..ad66378da5d9e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -28,6 +29,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanNamedReader; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -65,6 +67,8 @@ public NameId apply(long streamNameId) { private Attribute[] attributesCache = new Attribute[64]; + private EsField[] esFieldsCache = new EsField[64]; + private final PlanNameRegistry registry; // hook for nameId, where can cache and map, for now just return a NameId of the same long value. @@ -239,7 +243,7 @@ private Attribute attributeFromCache(int id) throws IOException { } /** - * Add and attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * Add an attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} * @param id The ID that will reference the attribute. Generated at serialization time * @param attr The attribute to cache */ @@ -250,4 +254,47 @@ private void cacheAttribute(int id, Attribute attr) { } attributesCache[id] = attr; } + + @SuppressWarnings("unchecked") + public A readEsFieldWithCache() throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} + int cacheId = Math.toIntExact(readZLong()); + if (cacheId < 0) { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + cacheId = -1 - cacheId; + EsField result = reader.read(this); + cacheEsField(cacheId, result); + return (A) result; + } else { + return (A) esFieldFromCache(cacheId); + } + } else { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + return (A) reader.read(this); + } + } + + private EsField esFieldFromCache(int id) throws IOException { + if (esFieldsCache[id] == null) { + throw new IOException("Attribute ID not found in serialization cache [" + id + "]"); + } + return esFieldsCache[id]; + } + + /** + * Add an EsField to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * @param id The ID that will reference the field. Generated at serialization time + * @param field The EsField to cache + */ + private void cacheEsField(int id, EsField field) { + assert id >= 0; + if (id >= esFieldsCache.length) { + esFieldsCache = ArrayUtil.grow(esFieldsCache); + } + esFieldsCache[id] = field; + } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index f918621d87a24..d76c61eac05d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.Configuration; @@ -62,6 +63,11 @@ public final class PlanStreamOutput extends StreamOutput implements org.elastics */ protected final Map cachedAttributes = new IdentityHashMap<>(); + /** + * Cache for EsFields. + */ + protected final Map cachedEsFields = new IdentityHashMap<>(); + private final StreamOutput delegate; private final PlanNameRegistry registry; @@ -205,6 +211,38 @@ private int cacheAttribute(Attribute attr) { return id; } + @Override + public boolean writeEsFieldCacheHeader(EsField field) throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + Integer cacheId = esFieldIdFromCache(field); + if (cacheId != null) { + writeZLong(cacheId); + return false; + } + + cacheId = cacheEsField(field); + writeZLong(-1 - cacheId); + } + writeString(field.getWriteableName()); + return true; + } + + private Integer esFieldIdFromCache(EsField field) { + return cachedEsFields.get(field); + } + + private int cacheEsField(EsField attr) { + if (cachedEsFields.containsKey(attr)) { + throw new IllegalArgumentException("EsField already present in the serialization cache [" + attr + "]"); + } + int id = cachedEsFields.size(); + if (id >= maxSerializedAttributes) { + throw new InvalidArgumentException("Limit of the number of serialized EsFields exceeded [{}]", maxSerializedAttributes); + } + cachedEsFields.put(attr, id); + return id; + } + /** * The byte representing a {@link Block} sent for the first time. The byte * will be followed by a {@link StreamOutput#writeVInt} encoded identifier diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 45989b4f563ce..8fddb7407a02a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -60,7 +61,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.DriverParallelism; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index b55c5f604023f..f0686baf68f6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -61,7 +61,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; @@ -70,7 +69,6 @@ import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.lang.invoke.MethodHandles; import java.util.ArrayList; @@ -193,14 +191,12 @@ public List getNamedWriteables() { entries.add(AsyncOperator.Status.ENTRY); entries.add(EnrichLookupOperator.Status.ENTRY); entries.addAll(Block.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project entries.addAll(Expression.getNamedWriteables()); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); // TODO combine with above once these are in the same project - entries.add(MultiTypeEsField.ENTRY); // TODO combine with EsField.getNamedWriteables() once these are in the same module entries.addAll(EsqlScalarFunction.getNamedWriteables()); entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(LogicalPlan.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index d8de034111865..339e7159ed87d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; @@ -119,7 +118,6 @@ public static NamedWriteableRegistry writableRegistry() { entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new)); entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new)); entries.add(SingleValueQuery.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); entries.addAll(NamedExpression.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 7a00f8ef154ce..596ff2af5fb5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; @@ -37,7 +36,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.add(UnsupportedAttribute.ENTRY); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.add(org.elasticsearch.xpack.esql.expression.Order.ENTRY); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java index 36f8b43e69378..2a6791a1f5300 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.SourceTests; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; @@ -81,7 +80,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index c625ae5dfb61b..76b813f08d818 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -56,7 +55,6 @@ protected final ExtraAttribute mutateInstance(ExtraAttribute instance) { protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java index 03befe66ac28e..8090a20ddc836 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java @@ -11,9 +11,9 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.AbstractEsFieldTypeTests; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.type.AbstractEsFieldTypeTests; public class FieldAttributeTests extends AbstractAttributeTestCase { public static FieldAttribute createFieldAttribute(int maxDepth, boolean onlyRepresentable) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java index 4ab2959b37d29..8e5c098c429db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; -import org.elasticsearch.xpack.esql.core.type.UnsupportedEsFieldTests; +import org.elasticsearch.xpack.esql.type.UnsupportedEsFieldTests; public class UnsupportedAttributeTests extends AbstractAttributeTestCase { @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index e1b56d61a211c..504cf4ec1cd12 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.esql.index; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.EsFieldTests; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.type.EsFieldTests; import java.io.IOException; import java.util.HashMap; @@ -56,7 +58,12 @@ private static Set randomConcreteIndices() { @Override protected Writeable.Reader instanceReader() { - return EsIndex::new; + return a -> new EsIndex(new PlanStreamInput(a, new PlanNameRegistry(), a.namedWriteableRegistry(), null)); + } + + @Override + protected Writeable.Writer instanceWriter() { + return (out, idx) -> new PlanStreamOutput(out, new PlanNameRegistry(), null).writeWriteable(idx); } @Override @@ -78,11 +85,6 @@ protected EsIndex mutateInstance(EsIndex instance) throws IOException { return new EsIndex(name, mapping, concreteIndices); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - /** * Build an {@link EsIndex} with many conflicting fields across many indices. */ @@ -136,7 +138,12 @@ public static EsIndex indexWithManyConflicts(boolean withParent) { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(976591)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(991027)); + /* + * History: + * 953.7kb - shorten error messages for UnsupportedAttributes #111973 + * 967.7kb - cache EsFields #112008 (little overhead of the cache) + */ } /** @@ -144,11 +151,12 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(1921374)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(1374498)); /* * History: * 16.9mb - start * 1.8mb - shorten error messages for UnsupportedAttributes #111973 + * 1.3mb - cache EsFields #112008 */ } @@ -170,8 +178,8 @@ public void testManyTypeConflictsWithParent() throws IOException { *

*/ private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { - try (BytesStreamOutput out = new BytesStreamOutput()) { - indexWithManyConflicts(withParent).writeTo(out); + try (BytesStreamOutput out = new BytesStreamOutput(); var pso = new PlanStreamOutput(out, new PlanNameRegistry(), null)) { + indexWithManyConflicts(withParent).writeTo(pso); assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index a5f2adbc1fc29..e5f195b053349 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -269,7 +269,7 @@ static Nullability randomNullability() { }; } - static EsField randomEsField() { + public static EsField randomEsField() { return randomEsField(0); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java index d169cdb5742af..cdb6c5384e16a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -259,6 +259,42 @@ public void testWriteDifferentAttributesSameID() throws IOException { } } + public void testWriteMultipleEsFields() throws IOException { + Configuration configuration = randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + List fields = new ArrayList<>(); + int occurrences = randomIntBetween(2, 300); + for (int i = 0; i < occurrences; i++) { + fields.add(PlanNamedTypesTests.randomEsField()); + } + + // send all the EsFields, three times + for (int i = 0; i < 3; i++) { + for (EsField attr : fields) { + attr.writeTo(planStream); + } + } + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + List readFields = new ArrayList<>(); + for (int i = 0; i < occurrences; i++) { + readFields.add(EsField.readFrom(in)); + assertThat(readFields.get(i), equalTo(fields.get(i))); + } + // two more times + for (int i = 0; i < 2; i++) { + for (int j = 0; j < occurrences; j++) { + EsField attr = EsField.readFrom(in); + assertThat(attr, sameInstance(readFields.get(j))); + } + } + } + } + } + private static Attribute randomAttribute() { return switch (randomInt(3)) { case 0 -> PlanNamedTypesTests.randomFieldAttribute(); @@ -293,7 +329,6 @@ private Column randomColumn() { writeables.addAll(Block.getNamedWriteables()); writeables.addAll(Attribute.getNamedWriteables()); writeables.add(UnsupportedAttribute.ENTRY); - writeables.addAll(EsField.getNamedWriteables()); REGISTRY = new NamedWriteableRegistry(new ArrayList<>(new HashSet<>(writeables))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java index 8562391b2e3b0..1b9df46a1c842 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.FieldAttributeTests; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -42,7 +41,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); return new NamedWriteableRegistry(entries); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java index b7b321a022b87..7a0d125ad85ba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -46,7 +45,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java index 237f8d6a9c580..ae58c49eade17 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.EsIndexSerializationTests; @@ -63,7 +62,12 @@ public static Source randomSource() { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(2444252)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(1897374)); + /* + * History: + * 2.3mb - shorten error messages for UnsupportedAttributes #111973 + * 1.8mb - cache EsFields #112008 + */ } /** @@ -71,12 +75,13 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(5885765)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(3271487)); /* * History: * 2 gb+ - start * 43.3mb - Cache attribute subclasses #111447 * 5.6mb - shorten error messages for UnsupportedAttributes #111973 + * 3.1mb - cache EsFields #112008 */ } @@ -131,7 +136,6 @@ private NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java similarity index 57% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java index a415c529894c3..9b2bf03b5c8aa 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java @@ -5,16 +5,26 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.AbstractWireTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.TreeMap; -public abstract class AbstractEsFieldTypeTests extends AbstractNamedWriteableTestCase { +public abstract class AbstractEsFieldTypeTests extends AbstractWireTestCase { public static EsField randomAnyEsField(int maxDepth) { return switch (between(0, 5)) { case 0 -> EsFieldTests.randomEsField(maxDepth); @@ -32,6 +42,25 @@ public static EsField randomAnyEsField(int maxDepth) { protected abstract T mutate(T instance); + @Override + protected EsField copyInstance(EsField instance, TransportVersion version) throws IOException { + NamedWriteableRegistry namedWriteableRegistry = getNamedWriteableRegistry(); + try ( + BytesStreamOutput output = new BytesStreamOutput(); + var pso = new PlanStreamOutput(output, new PlanNameRegistry(), EsqlTestUtils.TEST_CFG) + ) { + pso.setTransportVersion(version); + instance.writeTo(pso); + try ( + StreamInput in1 = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry); + var psi = new PlanStreamInput(in1, new PlanNameRegistry(), in1.namedWriteableRegistry(), EsqlTestUtils.TEST_CFG) + ) { + psi.setTransportVersion(version); + return EsField.readFrom(psi); + } + } + } + /** * Generate sub-properties. * @param maxDepth the maximum number of levels of properties to make @@ -59,11 +88,6 @@ protected final T mutateInstance(EsField instance) throws IOException { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - - @Override - protected final Class categoryClass() { - return EsField.class; + return new NamedWriteableRegistry(List.of()); } } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java similarity index 99% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java index 929aa1c0eab49..9f8c8f91b7037 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java @@ -4,13 +4,16 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.Converter; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.DataTypeConverter; import org.elasticsearch.xpack.versionfield.Version; import java.math.BigDecimal; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java similarity index 89% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java index dea03ee8a8cdf..bf0494d5fd043 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DateEsField; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java index e72ae0c5c0cda..e824b4de03e26 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java index 47a99329d0222..c66088b0695d4 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java similarity index 92% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java index a5d3b8329b2df..ef04f0e27c096 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java @@ -5,9 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.KeywordEsField; import java.util.Map; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java index 618ca812005f8..d4ca40b75d2f3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -9,13 +9,14 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.test.AbstractWireTestCase; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; @@ -57,7 +58,7 @@ * These differences can be minimized once Expression is fully supported in the new serialization approach, and the esql and esql.core * modules are merged, or at least the relevant classes are moved. */ -public class MultiTypeEsFieldTests extends AbstractNamedWriteableTestCase { +public class MultiTypeEsFieldTests extends AbstractWireTestCase { private Configuration config; @@ -94,26 +95,19 @@ protected MultiTypeEsField mutateInstance(MultiTypeEsField instance) throws IOEx protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); - entries.add(MultiTypeEsField.ENTRY); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } - @Override - protected final Class categoryClass() { - return MultiTypeEsField.class; - } - @Override protected final MultiTypeEsField copyInstance(MultiTypeEsField instance, TransportVersion version) throws IOException { return copyInstance( instance, getNamedWriteableRegistry(), - (out, v) -> new PlanStreamOutput(out, new PlanNameRegistry(), config).writeNamedWriteable(v), + (out, v) -> v.writeTo(new PlanStreamOutput(out, new PlanNameRegistry(), config)), in -> { PlanStreamInput pin = new PlanStreamInput(in, new PlanNameRegistry(), in.namedWriteableRegistry(), config); - return (MultiTypeEsField) pin.readNamedWriteable(EsField.class); + return EsField.readFrom(pin); }, version ); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java index 817dd7cd27094..9af3b7376f2b2 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.TextEsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java index e05d8ca10425e..a89ca9481b7e1 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import java.util.Map; From 73c5c1e1c587cc7ec7ce1f0d10fea49ecfd39002 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Tue, 27 Aug 2024 11:35:53 +0200 Subject: [PATCH 093/352] ByteArrayStreamInput: Return -1 when there are no more bytes to read (#112214) --- docs/changelog/112214.yaml | 5 +++++ .../common/io/stream/ByteArrayStreamInput.java | 6 +++++- .../elasticsearch/common/io/stream/AbstractStreamTests.java | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112214.yaml diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml new file mode 100644 index 0000000000000..430f95a72bb3f --- /dev/null +++ b/docs/changelog/112214.yaml @@ -0,0 +1,5 @@ +pr: 112214 +summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 838f2998d339f..a27eec4c12061 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -120,7 +120,11 @@ public void readBytes(byte[] b, int offset, int len) { @Override public int read(byte[] b, int off, int len) throws IOException { - int toRead = Math.min(len, available()); + final int available = limit - pos; + if (available <= 0) { + return -1; + } + int toRead = Math.min(len, available); readBytes(b, off, toRead); return toRead; } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index 8451d2fd64b9c..b1104a72400ea 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -723,6 +723,7 @@ public void testReadAfterReachingEndOfStream() throws IOException { input.readBytes(new byte[len], 0, len); assertEquals(-1, input.read()); + assertEquals(-1, input.read(new byte[2], 0, 2)); } } From fb32adcb174a7f32338b55737c8273fd962fefdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:10:05 +0200 Subject: [PATCH 094/352] Add manage roles privilege (#110633) This PR adds functionality to limit the resources and privileges an Elasticsearch user can grant permissions to when creating a role. This is achieved using a new [global](https://www.elastic.co/guide/en/elasticsearch/reference/current/defining-roles.html) (configurable/request aware) cluster privilege , named `role`, with a sub-key called `manage/indices` which is an array where each entry is a pair of [index patterns](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) and [index privileges](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-indices). ## Definition - Using a role with this privilege to create, update or delete roles with privileges on indices outside of the indices matched by the [index pattern](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) in the indices array, will fail. - Using a role with this privilege to try to create, update or delete roles with cluster, run_as, etc. privileges will fail. - Using a role with this privilege with restricted indices will fail. - Other broader privileges (such as manage_security) will nullify this privilege. ## Example Create `test-manage` role: ``` POST _security/role/test-manage { "global": { "role": { "manage": { "indices": [ { "names": ["allowed-index-prefix-*"], "privileges":["read"] } ] } } } } ``` And then a user with that role creates a role: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` But this would fail for: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "not-allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` ## Backwards compatibility and mixed cluster concerns - A new mapping version has been added to the security index to store the new privilege. - If the new mapping version is not applied and a role descriptor with the new global privilege is written, the write will fail causing an exception. - When sending role descriptors over the transport layer in a mixed cluster, the new global privilege needs to be excluded for older versions. This is hanled with a new transport version. - If a role descriptor is serialized for API keys on one node in a mixed cluster and read from another, an older node might not be able to deserialize it, so it needs to be removed before being written in mixed cluster with old nodes. This is handled in the API key service. - If a role descriptor containing a global privilege is in a put role request in a mixed cluster where it's not supported on all nodes, fail request to create role. - RCS is not applicable here since RCS only considers cluster privileges and index privileges (not global cluster privileges). - This doesn't include remote privileges, since the current use case with connectors doesn't need roles to be created on a cluster separate from the cluster where the search data resides. ## Follow up work - Create a docs PR - Error handling for actions that use manage roles. Should configurable cluster privileges that grant restricted usage of actions be listed in error authorization error messages? --- docs/changelog/110633.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/core/XPackClientPlugin.java | 7 +- .../authz/permission/ClusterPermission.java | 22 ++ .../authz/permission/IndicesPermission.java | 87 ++++- .../core/security/authz/permission/Role.java | 2 +- .../ConfigurableClusterPrivilege.java | 3 +- .../ConfigurableClusterPrivileges.java | 319 +++++++++++++++- .../authz/RoleDescriptorTestHelper.java | 35 +- .../RoleDescriptorsIntersectionTests.java | 5 + .../ConfigurableClusterPrivilegesTests.java | 8 +- .../privilege/ManageRolesPrivilegesTests.java | 351 ++++++++++++++++++ .../security/ManageRolesPrivilegeIT.java | 211 +++++++++++ .../xpack/security/apikey/ApiKeyRestIT.java | 67 ++++ .../xpack/security/authc/ApiKeyService.java | 125 ++++--- .../authz/store/NativeRolesStore.java | 11 +- .../support/SecuritySystemIndices.java | 40 ++ .../audit/logfile/LoggingAuditTrailTests.java | 10 +- .../security/audit/logfile/audited_roles.txt | 4 +- .../RolesBackwardsCompatibilityIT.java | 186 ++++++++-- 20 files changed, 1397 insertions(+), 102 deletions(-) create mode 100644 docs/changelog/110633.yaml create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java create mode 100644 x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml new file mode 100644 index 0000000000000..d4d1dc68cdbcc --- /dev/null +++ b/docs/changelog/110633.yaml @@ -0,0 +1,5 @@ +pr: 110633 +summary: Add manage roles privilege +area: Authorization +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 33f483c57b54e..582c618216999 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -198,6 +198,7 @@ static TransportVersion def(int id) { public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); + public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a2c3e40c76ae4..2e806a24ad469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -149,7 +149,7 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), - // security : conditional privileges + // security : configurable cluster privileges new NamedWriteableRegistry.Entry( ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, @@ -160,6 +160,11 @@ public List getNamedWriteables() { ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom + ), // security : role-mappings new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index c70f2a05bfe93..9c41786f39eeb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -17,6 +18,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Function; import java.util.function.Predicate; /** @@ -84,6 +86,16 @@ public static class Builder { private final List actionAutomatons = new ArrayList<>(); private final List permissionChecks = new ArrayList<>(); + private final RestrictedIndices restrictedIndices; + + public Builder(RestrictedIndices restrictedIndices) { + this.restrictedIndices = restrictedIndices; + } + + public Builder() { + this.restrictedIndices = null; + } + public Builder add( final ClusterPrivilege clusterPrivilege, final Set allowedActionPatterns, @@ -110,6 +122,16 @@ public Builder add(final ClusterPrivilege clusterPrivilege, final PermissionChec return this; } + public Builder addWithPredicateSupplier( + final ClusterPrivilege clusterPrivilege, + final Set allowedActionPatterns, + final Function> requestPredicateSupplier + ) { + final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of()); + Predicate requestPredicate = requestPredicateSupplier.apply(restrictedIndices); + return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate)); + } + public ClusterPermission build() { if (clusterPrivileges.isEmpty()) { return NONE; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index d29b1dd67757a..e1b72cc43b38e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -86,6 +87,7 @@ public Builder addGroup( public IndicesPermission build() { return new IndicesPermission(restrictedIndices, groups.toArray(Group.EMPTY_ARRAY)); } + } private IndicesPermission(RestrictedIndices restrictedIndices, Group[] groups) { @@ -238,6 +240,21 @@ public boolean check(String action) { return false; } + public boolean checkResourcePrivileges( + Set checkForIndexPatterns, + boolean allowRestrictedIndices, + Set checkForPrivileges, + @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder + ) { + return checkResourcePrivileges( + checkForIndexPatterns, + allowRestrictedIndices, + checkForPrivileges, + false, + resourcePrivilegesMapBuilder + ); + } + /** * For given index patterns and index privileges determines allowed privileges and creates an instance of {@link ResourcePrivilegesMap} * holding a map of resource to {@link ResourcePrivileges} where resource is index pattern and the map of index privilege to whether it @@ -246,6 +263,7 @@ public boolean check(String action) { * @param checkForIndexPatterns check permission grants for the set of index patterns * @param allowRestrictedIndices if {@code true} then checks permission grants even for restricted indices by index matching * @param checkForPrivileges check permission grants for the set of index privileges + * @param combineIndexGroups combine index groups to enable checking against regular expressions * @param resourcePrivilegesMapBuilder out-parameter for returning the details on which privilege over which resource is granted or not. * Can be {@code null} when no such details are needed so the method can return early, after * encountering the first privilege that is not granted over some resource. @@ -255,10 +273,13 @@ public boolean checkResourcePrivileges( Set checkForIndexPatterns, boolean allowRestrictedIndices, Set checkForPrivileges, + boolean combineIndexGroups, @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder ) { - final Map predicateCache = new HashMap<>(); boolean allMatch = true; + Map indexGroupAutomatons = indexGroupAutomatons( + combineIndexGroups && checkForIndexPatterns.stream().anyMatch(Automatons::isLuceneRegex) + ); for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { @@ -266,15 +287,14 @@ public boolean checkResourcePrivileges( } if (false == Operations.isEmpty(checkIndexAutomaton)) { Automaton allowedIndexPrivilegesAutomaton = null; - for (Group group : groups) { - final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, Group::getIndexMatcherAutomaton); - if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { + for (var indexAndPrivilegeAutomaton : indexGroupAutomatons.entrySet()) { + if (Operations.subsetOf(checkIndexAutomaton, indexAndPrivilegeAutomaton.getValue())) { if (allowedIndexPrivilegesAutomaton != null) { allowedIndexPrivilegesAutomaton = Automatons.unionAndMinimize( - Arrays.asList(allowedIndexPrivilegesAutomaton, group.privilege().getAutomaton()) + Arrays.asList(allowedIndexPrivilegesAutomaton, indexAndPrivilegeAutomaton.getKey()) ); } else { - allowedIndexPrivilegesAutomaton = group.privilege().getAutomaton(); + allowedIndexPrivilegesAutomaton = indexAndPrivilegeAutomaton.getKey(); } } } @@ -656,6 +676,61 @@ private static boolean containsPrivilegeThatGrantsMappingUpdatesForBwc(Group gro return group.privilege().name().stream().anyMatch(PRIVILEGE_NAME_SET_BWC_ALLOW_MAPPING_UPDATE::contains); } + /** + * Get all automatons for the index groups in this permission and optionally combine the index groups to enable checking if a set of + * index patterns specified using a regular expression grants a set of index privileges. + * + *

An index group is defined as a set of index patterns and a set of privileges (excluding field permissions and DLS queries). + * {@link IndicesPermission} consist of a set of index groups. For non-regular expression privilege checks, an index pattern is checked + * against each index group, to see if it's a sub-pattern of the index pattern for the group and then if that group grants some or all + * of the privileges requested. For regular expressions it's not sufficient to check per group since the index patterns covered by a + * group can be distinct sets and a regular expression can cover several distinct sets. + * + *

For example the two index groups: {"names": ["a"], "privileges": ["read", "create"]} and {"names": ["b"], + * "privileges": ["read","delete"]} will not match on ["\[ab]\"], while a single index group: + * {"names": ["a", "b"], "privileges": ["read"]} will. This happens because the index groups are evaluated against a request index + * pattern without first being combined. In the example above, the two index patterns should be combined to: + * {"names": ["a", "b"], "privileges": ["read"]} before being checked. + * + * + * @param combine combine index groups to allow for checking against regular expressions + * + * @return a map of all index and privilege pattern automatons + */ + private Map indexGroupAutomatons(boolean combine) { + // Map of privilege automaton object references (cached by IndexPrivilege::CACHE) + Map allAutomatons = new HashMap<>(); + for (Group group : groups) { + Automaton indexAutomaton = group.getIndexMatcherAutomaton(); + allAutomatons.compute( + group.privilege().getAutomaton(), + (key, value) -> value == null ? indexAutomaton : Automatons.unionAndMinimize(List.of(value, indexAutomaton)) + ); + if (combine) { + List> combinedAutomatons = new ArrayList<>(); + for (var indexAndPrivilegeAutomatons : allAutomatons.entrySet()) { + Automaton intersectingPrivileges = Operations.intersection( + indexAndPrivilegeAutomatons.getKey(), + group.privilege().getAutomaton() + ); + if (Operations.isEmpty(intersectingPrivileges) == false) { + Automaton indexPatternAutomaton = Automatons.unionAndMinimize( + List.of(indexAndPrivilegeAutomatons.getValue(), indexAutomaton) + ); + combinedAutomatons.add(new Tuple<>(intersectingPrivileges, indexPatternAutomaton)); + } + } + combinedAutomatons.forEach( + automatons -> allAutomatons.compute( + automatons.v1(), + (key, value) -> value == null ? automatons.v2() : Automatons.unionAndMinimize(List.of(value, automatons.v2())) + ) + ); + } + } + return allAutomatons; + } + public static class Group { public static final Group[] EMPTY_ARRAY = new Group[0]; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 0fc04e8cc9a52..d8d56a4fbb247 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -233,7 +233,7 @@ private Builder(RestrictedIndices restrictedIndices, String[] names) { } public Builder cluster(Set privilegeNames, Iterable configurableClusterPrivileges) { - ClusterPermission.Builder builder = ClusterPermission.builder(); + ClusterPermission.Builder builder = new ClusterPermission.Builder(restrictedIndices); if (privilegeNames.isEmpty() == false) { for (String name : privilegeNames) { builder = ClusterPrivilegeResolver.resolve(name).buildPermission(builder); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java index f9722ca42f20d..edb0cb8f9e79d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java @@ -41,7 +41,8 @@ public interface ConfigurableClusterPrivilege extends NamedWriteable, ToXContent */ enum Category { APPLICATION(new ParseField("application")), - PROFILE(new ParseField("profile")); + PROFILE(new ParseField("profile")), + ROLE(new ParseField("role")); public final ParseField field; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index fed8b7e0d7a1c..b93aa079a28d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.core.security.authz.privilege; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,10 +20,21 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.privilege.ApplicationPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege.Category; import org.elasticsearch.xpack.core.security.support.StringMatcher; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; @@ -30,12 +44,18 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; import java.util.function.Predicate; +import static org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege.DELETE_INDEX; + /** * Static utility class for working with {@link ConfigurableClusterPrivilege} instances */ @@ -43,6 +63,7 @@ public final class ConfigurableClusterPrivileges { public static final ConfigurableClusterPrivilege[] EMPTY_ARRAY = new ConfigurableClusterPrivilege[0]; + private static final Logger logger = LogManager.getLogger(ConfigurableClusterPrivileges.class); public static final Writeable.Reader READER = in1 -> in1.readNamedWriteable( ConfigurableClusterPrivilege.class ); @@ -61,7 +82,16 @@ public static ConfigurableClusterPrivilege[] readArray(StreamInput in) throws IO * Utility method to write an array of {@link ConfigurableClusterPrivilege} objects to a {@link StreamOutput} */ public static void writeArray(StreamOutput out, ConfigurableClusterPrivilege[] privileges) throws IOException { - out.writeArray(WRITER, privileges); + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + out.writeArray(WRITER, privileges); + } else { + out.writeArray( + WRITER, + Arrays.stream(privileges) + .filter(privilege -> privilege instanceof ManageRolesPrivilege == false) + .toArray(ConfigurableClusterPrivilege[]::new) + ); + } } /** @@ -97,7 +127,7 @@ public static List parse(XContentParser parser) th while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); - expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field); + expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field, Category.ROLE.field); if (Category.APPLICATION.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { @@ -106,8 +136,7 @@ public static List parse(XContentParser parser) th expectFieldName(parser, ManageApplicationPrivileges.Fields.MANAGE); privileges.add(ManageApplicationPrivileges.parse(parser)); } - } else { - assert Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler()); + } else if (Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); @@ -115,9 +144,16 @@ public static List parse(XContentParser parser) th expectFieldName(parser, WriteProfileDataPrivileges.Fields.WRITE); privileges.add(WriteProfileDataPrivileges.parse(parser)); } + } else if (Category.ROLE.field.match(parser.currentName(), parser.getDeprecationHandler())) { + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + + expectFieldName(parser, ManageRolesPrivilege.Fields.MANAGE); + privileges.add(ManageRolesPrivilege.parse(parser)); + } } } - return privileges; } @@ -362,4 +398,277 @@ private interface Fields { ParseField APPLICATIONS = new ParseField("applications"); } } + + public static class ManageRolesPrivilege implements ConfigurableClusterPrivilege { + public static final String WRITEABLE_NAME = "manage-roles-privilege"; + private final List indexPermissionGroups; + private final Function> requestPredicateSupplier; + + private static final Set EXPECTED_INDEX_GROUP_FIELDS = Set.of( + Fields.NAMES.getPreferredName(), + Fields.PRIVILEGES.getPreferredName() + ); + + public ManageRolesPrivilege(List manageRolesIndexPermissionGroups) { + this.indexPermissionGroups = manageRolesIndexPermissionGroups; + this.requestPredicateSupplier = (restrictedIndices) -> { + IndicesPermission.Builder indicesPermissionBuilder = new IndicesPermission.Builder(restrictedIndices); + for (ManageRolesIndexPermissionGroup indexPatternPrivilege : manageRolesIndexPermissionGroups) { + indicesPermissionBuilder.addGroup( + IndexPrivilege.get(Set.of(indexPatternPrivilege.privileges())), + FieldPermissions.DEFAULT, + null, + false, + indexPatternPrivilege.indexPatterns() + ); + } + final IndicesPermission indicesPermission = indicesPermissionBuilder.build(); + + return (TransportRequest request) -> { + if (request instanceof final PutRoleRequest putRoleRequest) { + return hasNonIndexPrivileges(putRoleRequest.roleDescriptor()) == false + && Arrays.stream(putRoleRequest.indices()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ); + } else if (request instanceof final BulkPutRolesRequest bulkPutRoleRequest) { + return bulkPutRoleRequest.getRoles().stream().noneMatch(ManageRolesPrivilege::hasNonIndexPrivileges) + && bulkPutRoleRequest.getRoles() + .stream() + .allMatch( + roleDescriptor -> Arrays.stream(roleDescriptor.getIndicesPrivileges()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ) + ); + } else if (request instanceof final DeleteRoleRequest deleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + new String[] { deleteRoleRequest.name() }, + DELETE_INDEX.name().toArray(String[]::new) + ); + } else if (request instanceof final BulkDeleteRolesRequest bulkDeleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + bulkDeleteRoleRequest.getRoleNames().toArray(String[]::new), + DELETE_INDEX.name().toArray(String[]::new) + ); + } + throw new IllegalArgumentException("Unsupported request type [" + request.getClass() + "]"); + }; + }; + } + + @Override + public Category getCategory() { + return Category.ROLE; + } + + @Override + public String getWriteableName() { + return WRITEABLE_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(indexPermissionGroups); + } + + public static ManageRolesPrivilege createFrom(StreamInput in) throws IOException { + final List indexPatternPrivileges = in.readCollectionAsList( + ManageRolesIndexPermissionGroup::createFrom + ); + return new ManageRolesPrivilege(indexPatternPrivileges); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field( + Fields.MANAGE.getPreferredName(), + Map.of(Fields.INDICES.getPreferredName(), indexPermissionGroups.stream().map(indexPatternPrivilege -> { + Map sortedMap = new TreeMap<>(); + sortedMap.put(Fields.NAMES.getPreferredName(), indexPatternPrivilege.indexPatterns()); + sortedMap.put(Fields.PRIVILEGES.getPreferredName(), indexPatternPrivilege.privileges()); + return sortedMap; + }).toList()) + ); + } + + private static void expectedIndexGroupFields(String fieldName, XContentParser parser) { + if (EXPECTED_INDEX_GROUP_FIELDS.contains(fieldName) == false) { + throw new XContentParseException( + parser.getTokenLocation(), + "failed to parse privilege. expected one of " + + Arrays.toString(EXPECTED_INDEX_GROUP_FIELDS.toArray(String[]::new)) + + " but found [" + + fieldName + + "] instead" + ); + } + } + + public static ManageRolesPrivilege parse(XContentParser parser) throws IOException { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.MANAGE); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.INDICES); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + List indexPrivileges = new ArrayList<>(); + Map parsedArraysByFieldName = new HashMap<>(); + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + expectedToken(token, parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + String currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + indexPrivileges.add( + new ManageRolesIndexPermissionGroup( + parsedArraysByFieldName.get(Fields.NAMES.getPreferredName()), + parsedArraysByFieldName.get(Fields.PRIVILEGES.getPreferredName()) + ) + ); + } + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + + for (var indexPrivilege : indexPrivileges) { + if (indexPrivilege.indexPatterns == null || indexPrivilege.indexPatterns.length == 0) { + throw new IllegalArgumentException("Indices privileges must refer to at least one index name or index name pattern"); + } + if (indexPrivilege.privileges == null || indexPrivilege.privileges.length == 0) { + throw new IllegalArgumentException("Indices privileges must define at least one privilege"); + } + } + return new ManageRolesPrivilege(indexPrivileges); + } + + public record ManageRolesIndexPermissionGroup(String[] indexPatterns, String[] privileges) implements Writeable { + public static ManageRolesIndexPermissionGroup createFrom(StreamInput in) throws IOException { + return new ManageRolesIndexPermissionGroup(in.readStringArray(), in.readStringArray()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indexPatterns); + out.writeStringArray(privileges); + } + + @Override + public String toString() { + return "{" + + Fields.NAMES + + ":" + + Arrays.toString(indexPatterns()) + + ":" + + Fields.PRIVILEGES + + ":" + + Arrays.toString(privileges()) + + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ManageRolesIndexPermissionGroup that = (ManageRolesIndexPermissionGroup) o; + return Arrays.equals(indexPatterns, that.indexPatterns) && Arrays.equals(privileges, that.privileges); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indexPatterns), Arrays.hashCode(privileges)); + } + } + + @Override + public String toString() { + return "{" + + getCategory() + + ":" + + Fields.MANAGE.getPreferredName() + + ":" + + Fields.INDICES.getPreferredName() + + "=[" + + Strings.collectionToDelimitedString(indexPermissionGroups, ",") + + "]}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ManageRolesPrivilege that = (ManageRolesPrivilege) o; + + if (this.indexPermissionGroups.size() != that.indexPermissionGroups.size()) { + return false; + } + + for (int i = 0; i < this.indexPermissionGroups.size(); i++) { + if (Objects.equals(this.indexPermissionGroups.get(i), that.indexPermissionGroups.get(i)) == false) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return Objects.hash(indexPermissionGroups.hashCode()); + } + + @Override + public ClusterPermission.Builder buildPermission(final ClusterPermission.Builder builder) { + return builder.addWithPredicateSupplier( + this, + Set.of(PutRoleAction.NAME, ActionTypes.BULK_PUT_ROLES.name(), ActionTypes.BULK_DELETE_ROLES.name(), DeleteRoleAction.NAME), + requestPredicateSupplier + ); + } + + private static boolean requestIndexPatternsAllowed( + IndicesPermission indicesPermission, + String[] requestIndexPatterns, + String[] privileges + ) { + return indicesPermission.checkResourcePrivileges(Set.of(requestIndexPatterns), false, Set.of(privileges), true, null); + } + + private static boolean hasNonIndexPrivileges(RoleDescriptor roleDescriptor) { + return roleDescriptor.hasApplicationPrivileges() + || roleDescriptor.hasClusterPrivileges() + || roleDescriptor.hasConfigurableClusterPrivileges() + || roleDescriptor.hasRemoteIndicesPrivileges() + || roleDescriptor.hasRemoteClusterPermissions() + || roleDescriptor.hasRunAs() + || roleDescriptor.hasWorkflowsRestriction(); + } + + private interface Fields { + ParseField MANAGE = new ParseField("manage"); + ParseField INDICES = new ParseField("indices"); + ParseField PRIVILEGES = new ParseField("privileges"); + ParseField NAMES = new ParseField("names"); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java index 2d8b62335f4ef..77a37cec45b25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java @@ -26,6 +26,7 @@ import static org.elasticsearch.test.ESTestCase.generateRandomStringArray; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomArray; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; @@ -52,6 +53,7 @@ public static RoleDescriptor randomRoleDescriptor() { .allowRestriction(randomBoolean()) .allowDescription(randomBoolean()) .allowRemoteClusters(randomBoolean()) + .allowConfigurableClusterPrivileges(randomBoolean()) .build(); } @@ -69,7 +71,7 @@ public static Map randomRoleDescriptorMetadata(boolean allowRese } public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { - final ConfigurableClusterPrivilege[] configurableClusterPrivileges = switch (randomIntBetween(0, 4)) { + return switch (randomIntBetween(0, 5)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageApplicationPrivileges( @@ -93,9 +95,9 @@ public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { new ConfigurableClusterPrivileges.WriteProfileDataPrivileges( Sets.newHashSet(generateRandomStringArray(3, randomIntBetween(4, 12), false, false)) ) }; + case 5 -> randomManageRolesPrivileges(); default -> throw new IllegalStateException("Unexpected value"); }; - return configurableClusterPrivileges; } public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPrivileges() { @@ -119,6 +121,27 @@ public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPr return applicationPrivileges; } + public static ConfigurableClusterPrivilege[] randomManageRolesPrivileges() { + List indexPatternPrivileges = randomList( + 1, + 10, + () -> { + String[] indexPatterns = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(5, 100)); + + int startIndex = randomIntBetween(0, IndexPrivilege.names().size() - 2); + int endIndex = randomIntBetween(startIndex + 1, IndexPrivilege.names().size()); + + String[] indexPrivileges = IndexPrivilege.names().stream().toList().subList(startIndex, endIndex).toArray(String[]::new); + return new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns, + indexPrivileges + ); + } + ); + + return new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageRolesPrivilege(indexPatternPrivileges) }; + } + public static RoleDescriptor.RemoteIndicesPrivileges[] randomRemoteIndicesPrivileges(int min, int max) { return randomRemoteIndicesPrivileges(min, max, Set.of()); } @@ -251,6 +274,7 @@ public static class Builder { private boolean allowRestriction = false; private boolean allowDescription = false; private boolean allowRemoteClusters = false; + private boolean allowConfigurableClusterPrivileges = false; public Builder() {} @@ -259,6 +283,11 @@ public Builder allowReservedMetadata(boolean allowReservedMetadata) { return this; } + public Builder allowConfigurableClusterPrivileges(boolean allowConfigurableClusterPrivileges) { + this.allowConfigurableClusterPrivileges = allowConfigurableClusterPrivileges; + return this; + } + public Builder alwaysIncludeRemoteIndices() { this.alwaysIncludeRemoteIndices = true; return this; @@ -302,7 +331,7 @@ public RoleDescriptor build() { randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), randomIndicesPrivileges(0, 3), randomApplicationPrivileges(), - randomClusterPrivileges(), + allowConfigurableClusterPrivileges ? randomClusterPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(allowReservedMetadata), Map.of(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java index a892e8b864e6e..b67292e76961f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java @@ -48,6 +48,11 @@ public void testSerialization() throws IOException { ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom ) ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java index c6fac77ea26e6..5599b33fbcfe7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java @@ -61,13 +61,15 @@ public void testGenerateAndParseXContent() throws Exception { } private ConfigurableClusterPrivilege[] buildSecurityPrivileges() { - return switch (randomIntBetween(0, 3)) { + return switch (randomIntBetween(0, 4)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges() }; case 2 -> new ConfigurableClusterPrivilege[] { WriteProfileDataPrivilegesTests.buildPrivileges() }; - case 3 -> new ConfigurableClusterPrivilege[] { + case 3 -> new ConfigurableClusterPrivilege[] { ManageRolesPrivilegesTests.buildPrivileges() }; + case 4 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges(), - WriteProfileDataPrivilegesTests.buildPrivileges() }; + WriteProfileDataPrivilegesTests.buildPrivileges(), + ManageRolesPrivilegesTests.buildPrivileges() }; default -> throw new IllegalStateException("Unexpected value"); }; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java new file mode 100644 index 0000000000000..2d47752063d9d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java @@ -0,0 +1,351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageRolesPrivilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; + +public class ManageRolesPrivilegesTests extends AbstractNamedWriteableTestCase { + + private static final int MIN_INDEX_NAME_LENGTH = 4; + + public void testSimplePutRoleRequest() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedIndexPatterns( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + + public void testDeleteRoleRequest() { + new ReservedRolesStore(); + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "manage" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedDeleteIndex( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "read" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), false); + } + } + + public void testSeveralIndexGroupsPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "read" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[cd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[acd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ef]/" }, new String[] { "read" }, false); + } + + public void testPrivilegeIntersectionPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "all" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "create" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "delete" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "e" }, new String[] { "create_doc" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "f" }, new String[] { "read", "manage" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "all" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abc]/" }, new String[] { "all" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read", "manage" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create", "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abcde]/" }, new String[] { "create_doc" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[eb]/" }, new String[] { "create_doc" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[d]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ad]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[de]/" }, new String[] { "delete" }, false); + + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "write" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + } + + public void testEmptyPrivileges() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege(List.of()); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "test" }, new String[] { "all" }, false); + } + + public void testRestrictedIndexPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "security" }, true); + assertAllowedIndexPatterns(permission, new String[] { ".security" }, false); + assertAllowedIndexPatterns(permission, new String[] { "security", ".security-7" }, false); + } + + public void testGenerateAndParseXContent() throws Exception { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + final XContentBuilder builder = new XContentBuilder(xContent, out); + + final ManageRolesPrivilege original = buildPrivileges(); + builder.startObject(); + original.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.flush(); + + final byte[] bytes = out.toByteArray(); + try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, bytes)) { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + final ManageRolesPrivilege clone = ManageRolesPrivilege.parse(parser); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT)); + + assertThat(clone, equalTo(original)); + assertThat(original, equalTo(clone)); + } + } + } + + public void testPutRoleRequestContainsNonIndexPrivileges() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + + switch (randomIntBetween(0, 5)) { + case 0: + putRoleRequest.cluster("all"); + break; + case 1: + putRoleRequest.runAs("test"); + break; + case 2: + putRoleRequest.addApplicationPrivileges( + RoleDescriptor.ApplicationResourcePrivileges.builder() + .privileges("all") + .application("test-app") + .resources("test-resource") + .build() + ); + break; + case 3: + putRoleRequest.addRemoteIndex( + new RoleDescriptor.RemoteIndicesPrivileges.Builder("test-cluster").privileges("all").indices("test*").build() + ); + break; + case 4: + putRoleRequest.putRemoteCluster( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "test" }) + ) + ); + break; + case 5: + putRoleRequest.conditionalCluster( + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "test-*" }, new String[] { "read" }) + ) + ) + ); + break; + } + + putRoleRequest.name(randomAlphaOfLength(4)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(false)); + } + + private static boolean permissionCheck(ClusterPermission permission, String action, ActionRequest request) { + final Authentication authentication = AuthenticationTestHelper.builder().build(); + assertThat(request.validate(), nullValue()); + return permission.check(action, request, authentication); + } + + private static void assertAllowedIndexPatterns(ClusterPermission permission, String[] indexPatterns, boolean expected) { + assertAllowedIndexPatterns(permission, indexPatterns, new String[] { "index", "write", "indices:data/read" }, expected); + } + + private static void assertAllowedIndexPatterns( + ClusterPermission permission, + String[] indexPatterns, + String[] privileges, + boolean expected + ) { + { + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + putRoleRequest.name(randomAlphaOfLength(3)); + putRoleRequest.addIndex(indexPatterns, privileges, null, null, null, false); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(expected)); + } + { + final BulkPutRolesRequest bulkPutRolesRequest = new BulkPutRolesRequest( + List.of( + new RoleDescriptor( + randomAlphaOfLength(3), + new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(indexPatterns).privileges(privileges).build() }, + new String[] {} + ) + ) + ); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_put", bulkPutRolesRequest), is(expected)); + } + } + + private static void assertAllowedDeleteIndex(ClusterPermission permission, String[] indices, boolean expected) { + { + final BulkDeleteRolesRequest bulkDeleteRolesRequest = new BulkDeleteRolesRequest(List.of(indices)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_delete", bulkDeleteRolesRequest), is(expected)); + } + { + assertThat(Arrays.stream(indices).allMatch(pattern -> { + final DeleteRoleRequest deleteRolesRequest = new DeleteRoleRequest(); + deleteRolesRequest.name(pattern); + return permissionCheck(permission, "cluster:admin/xpack/security/role/delete", deleteRolesRequest); + }), is(expected)); + } + } + + public static ManageRolesPrivilege buildPrivileges() { + return buildPrivileges(randomIntBetween(MIN_INDEX_NAME_LENGTH, 7)); + } + + private static ManageRolesPrivilege buildPrivileges(int indexNameLength) { + String[] indexNames = Objects.requireNonNull(generateRandomStringArray(5, indexNameLength, false, false)); + + return new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(indexNames, IndexPrivilege.READ.name().toArray(String[]::new))) + ); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + try (var xClientPlugin = new XPackClientPlugin()) { + return new NamedWriteableRegistry(xClientPlugin.getNamedWriteables()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Class categoryClass() { + return ConfigurableClusterPrivilege.class; + } + + @Override + protected ConfigurableClusterPrivilege createTestInstance() { + return buildPrivileges(); + } + + @Override + protected ConfigurableClusterPrivilege mutateInstance(ConfigurableClusterPrivilege instance) throws IOException { + if (instance instanceof ManageRolesPrivilege) { + return buildPrivileges(MIN_INDEX_NAME_LENGTH - 1); + } + fail(); + return null; + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java new file mode 100644 index 0000000000000..728f068adcae4 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.core.StringContains.containsString; + +public class ManageRolesPrivilegeIT extends SecurityInBasicRestTestCase { + + private TestSecurityClient adminSecurityClient; + private static final SecureString TEST_PASSWORD = new SecureString("100%-secure-password".toCharArray()); + + @Before + public void setupClient() { + adminSecurityClient = new TestSecurityClient(adminClient()); + } + + public void testManageRoles() throws Exception { + createManageRolesRole("manage-roles-role", new String[0], Set.of("*-allowed-suffix"), Set.of("read", "write")); + createUser("test-user", Set.of("manage-roles-role")); + + String authHeader = basicAuthHeaderValue("test-user", TEST_PASSWORD); + + createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges(Set.of("read", "write")).build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-suffix-not-allowed").privileges("write").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges("manage").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + } + + public void testManageSecurityNullifiesManageRoles() throws Exception { + createManageRolesRole("manage-roles-no-manage-security", new String[0], Set.of("allowed")); + createManageRolesRole("manage-roles-manage-security", new String[] { "manage_security" }, Set.of("allowed")); + + createUser("test-user-no-manage-security", Set.of("manage-roles-no-manage-security")); + createUser("test-user-manage-security", Set.of("manage-roles-manage-security")); + + String authHeaderNoManageSecurity = basicAuthHeaderValue("test-user-no-manage-security", TEST_PASSWORD); + String authHeaderManageSecurity = basicAuthHeaderValue("test-user-manage-security", TEST_PASSWORD); + + createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + // TODO Should the new global role/manage privilege be listed here? Probably not because it's not documented + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + + createRole( + authHeaderManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + } + + private void createRole(String authHeader, RoleDescriptor descriptor) throws IOException { + TestSecurityClient userAuthSecurityClient = new TestSecurityClient( + adminClient(), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader).build() + ); + userAuthSecurityClient.putRole(descriptor); + } + + private void createUser(String username, Set roles) throws IOException { + adminSecurityClient.putUser(new User(username, roles.toArray(String[]::new)), TEST_PASSWORD); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns) throws IOException { + createManageRolesRole(roleName, clusterPrivileges, indexPatterns, Set.of("read")); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns, Set privileges) + throws IOException { + adminSecurityClient.putRole( + new RoleDescriptor( + roleName, + clusterPrivileges, + new RoleDescriptor.IndicesPrivileges[0], + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns.toArray(String[]::new), + privileges.toArray(String[]::new) + ) + ) + ) }, + new String[0], + Map.of(), + Map.of() + ) + ); + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 5ae84517202d4..667140b849951 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.After; import org.junit.Before; @@ -385,6 +387,50 @@ public void testGrantApiKeyWithOnlyManageOwnApiKeyPrivilegeFails() throws IOExce assertThat(e.getMessage(), containsString("action [" + GrantApiKeyAction.NAME + "] is unauthorized for user")); } + public void testApiKeyWithManageRoles() throws IOException { + RoleDescriptor role = roleWithManageRoles("manage-roles-role", new String[] { "manage_own_api_key" }, "allowed-prefix*"); + getSecurityClient().putRole(role); + createUser("test-user", END_USER_PASSWORD, List.of("manage-roles-role")); + + final Request createApiKeyrequest = new Request("POST", "_security/api_key"); + createApiKeyrequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue("test-user", END_USER_PASSWORD)) + ); + final Map requestBody = Map.of( + "name", + "test-api-key", + "role_descriptors", + Map.of( + "test-role", + XContentTestUtils.convertToMap(roleWithManageRoles("test-role", new String[0], "allowed-prefix*")), + "another-test-role", + // This is not allowed by the limited-by-role (creator of the api key), so should not grant access to not-allowed=prefix* + XContentTestUtils.convertToMap(roleWithManageRoles("another-test-role", new String[0], "not-allowed-prefix*")) + ) + ); + + createApiKeyrequest.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString()); + Map responseMap = responseAsMap(client().performRequest(createApiKeyrequest)); + String encodedApiKey = responseMap.get("encoded").toString(); + + final Request createRoleRequest = new Request("POST", "_security/role/test-role"); + createRoleRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encodedApiKey)); + // Allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["allowed-prefix-test"],"privileges": ["read"]}]}"""); + assertOK(client().performRequest(createRoleRequest)); + } + // Not allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["not-allowed-prefix-test"],"privileges": ["read"]}]}"""); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createRoleRequest)); + assertEquals(403, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("this action is granted by the cluster privileges [manage_security,all]")); + } + } + public void testUpdateApiKey() throws IOException { final var apiKeyName = "my-api-key-name"; final Map apiKeyMetadata = Map.of("not", "returned"); @@ -2393,6 +2439,27 @@ private void createRole(String name, Collection localClusterPrivileges, getSecurityClient().putRole(role); } + private RoleDescriptor roleWithManageRoles(String name, String[] clusterPrivileges, String indexPattern) { + return new RoleDescriptor( + name, + clusterPrivileges, + null, + null, + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { indexPattern }, + new String[] { "read" } + ) + ) + ) }, + null, + null, + null + ); + } + protected void createRoleWithDescription(String name, Collection clusterPrivileges, String description) throws IOException { final RoleDescriptor role = new RoleDescriptor( name, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index d88577f905e96..90566e25b4ea5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -100,6 +100,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -137,6 +138,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; @@ -363,29 +365,10 @@ public void createApiKey( listener.onFailure(new IllegalArgumentException("authentication must be provided")); } else { final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) - && hasRemoteIndices(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote cluster privileges for API keys" - ) - ); + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && request.getType() == ApiKey.Type.CROSS_CLUSTER) { listener.onFailure( @@ -407,15 +390,63 @@ && hasRemoteIndices(request.getRoleDescriptors())) { return; } - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, transportVersion, request.getId() ); - createApiKeyAndIndexIt(authentication, request, filteredUserRoleDescriptors, listener); + createApiKeyAndIndexIt(authentication, request, filteredRoleDescriptors, listener); + } + } + + private Set filterRoleDescriptorsForMixedCluster( + final Set userRoleDescriptors, + final TransportVersion transportVersion, + final String... apiKeyIds + ) { + final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); + return maybeRemoveRemotePrivileges(userRolesWithoutDescription, transportVersion, apiKeyIds); + } + + private boolean validateRoleDescriptorsForMixedCluster( + final ActionListener listener, + final List roleDescriptors, + final TransportVersion transportVersion + ) { + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(roleDescriptors)) { + // API keys with roles which define remote indices privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() + + "] or higher to support remote indices privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(roleDescriptors)) { + // API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ROLE_REMOTE_CLUSTER_PRIVS + + "] or higher to support remote cluster privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ADD_MANAGE_ROLES_PRIVILEGE) && hasGlobalManageRolesPrivilege(roleDescriptors)) { + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ADD_MANAGE_ROLES_PRIVILEGE + + "] or higher to support the manage roles privilege for API keys" + ) + ); + return false; } + return true; } /** @@ -458,6 +489,13 @@ private static boolean hasRemoteCluster(Collection roleDescripto return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions); } + private static boolean hasGlobalManageRolesPrivilege(Collection roleDescriptors) { + return roleDescriptors != null + && roleDescriptors.stream() + .flatMap(roleDescriptor -> Arrays.stream(roleDescriptor.getConditionalClusterPrivileges())) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege); + } + private static IllegalArgumentException validateWorkflowsRestrictionConstraints( TransportVersion transportVersion, List requestRoleDescriptors, @@ -594,28 +632,11 @@ public void updateApiKeys( } final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote indices privileges for API keys" - ) - ); + + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + final Exception workflowsValidationException = validateWorkflowsRestrictionConstraints( transportVersion, request.getRoleDescriptors(), @@ -627,22 +648,22 @@ public void updateApiKeys( } final String[] apiKeyIds = request.getIds().toArray(String[]::new); - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, - transportVersion, - apiKeyIds - ); if (logger.isDebugEnabled()) { logger.debug("Updating [{}] API keys", buildDelimitedStringWithLimit(10, apiKeyIds)); } + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, + transportVersion, + apiKeyIds + ); + findVersionedApiKeyDocsForSubject( authentication, apiKeyIds, ActionListener.wrap( - versionedDocs -> updateApiKeys(authentication, request, filteredUserRoleDescriptors, versionedDocs, listener), + versionedDocs -> updateApiKeys(authentication, request, filteredRoleDescriptors, versionedDocs, listener), ex -> listener.onFailure(traceLog("bulk update", ex)) ) ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index a2d2b21b489ea..9ddda193dba39 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; @@ -476,7 +477,15 @@ private Exception validateRoleDescriptor(RoleDescriptor role) { + TransportVersions.SECURITY_ROLE_DESCRIPTION.toReleaseVersion() + "] or higher to support specifying role description" ); - } + } else if (Arrays.stream(role.getConditionalClusterPrivileges()) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege) + && clusterService.state().getMinTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + return new IllegalStateException( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ); + } try { DLSRoleQueryValidator.validateQueryField(role.getIndicesPrivileges(), xContentRegistry); } catch (ElasticsearchException | IllegalArgumentException e) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 4c5ce703f48ad..9541dd9dc470d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_VERSION_STRING; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_MANAGE_ROLES_PRIVILEGE; /** * Responsible for handling system indices for the Security plugin @@ -409,6 +410,40 @@ private XContentBuilder getMainIndexMappings(SecurityMainIndexMappingVersion map builder.endObject(); } builder.endObject(); + if (mappingVersion.onOrAfter(ADD_MANAGE_ROLES_PRIVILEGE)) { + builder.startObject("role"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("manage"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("indices"); + { + builder.startObject("properties"); + { + builder.startObject("names"); + builder.field("type", "keyword"); + builder.endObject(); + builder.startObject("privileges"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } } builder.endObject(); } @@ -1050,6 +1085,11 @@ public enum SecurityMainIndexMappingVersion implements VersionId(Arrays.asList("", "\""))), - new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")) }, + new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")), + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { "test*" }, + new String[] { "read", "write" } + ) + ) + ) }, new String[] { "\"[a]/" }, Map.of(), Map.of() diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt index 7b5e24c97d65a..f913c8608960b 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt @@ -7,6 +7,6 @@ role_descriptor2 role_descriptor3 {"cluster":[],"indices":[],"applications":[{"application":"maps","privileges":["{","}","\n","\\","\""],"resources":["raster:*"]},{"application":"maps","privileges":["*:*"],"resources":["noooooo!!\n\n\f\\\\r","{"]}],"run_as":["jack","nich*","//\""],"metadata":{"some meta":42}} role_descriptor4 -{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} +{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{},"role":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} role_descriptor5 -{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} +{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}},"role":{"manage":{"indices":[{"names":["test*"],"privileges":["read","write"]}]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java index 4f4ff1d5743ee..650779cfbc85d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java @@ -29,6 +29,7 @@ import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomApplicationPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomIndicesPrivileges; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomManageRolesPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomRoleDescriptorMetadata; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -40,7 +41,7 @@ public class RolesBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private RestClient oldVersionClient = null; private RestClient newVersionClient = null; - public void testCreatingAndUpdatingRoles() throws Exception { + public void testRolesWithDescription() throws Exception { assumeTrue( "The role description is supported after transport version: " + TransportVersions.SECURITY_ROLE_DESCRIPTION, minimumTransportVersion().before(TransportVersions.SECURITY_ROLE_DESCRIPTION) @@ -48,14 +49,14 @@ public void testCreatingAndUpdatingRoles() throws Exception { switch (CLUSTER_TYPE) { case OLD -> { // Creating role in "old" cluster should succeed when description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-old-role", initialRole); - updateRole("my-old-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); // and fail if we include description var createException = expectThrows( Exception.class, - () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorSerialized(true)) + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( createException.getMessage(), @@ -65,7 +66,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { RestClient client = client(); var updateException = expectThrows( Exception.class, - () -> updateRole(client, "my-old-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( updateException.getMessage(), @@ -74,17 +75,20 @@ public void testCreatingAndUpdatingRoles() throws Exception { } case MIXED -> { try { - this.createClientsByVersion(); + this.createClientsByVersion(TransportVersions.SECURITY_ROLE_DESCRIPTION); // succeed when role description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-valid-mixed-role", initialRole); - updateRole("my-valid-mixed-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); // against old node, fail when description is provided either in update or create request { Exception e = expectThrows( Exception.class, - () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -94,7 +98,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -106,7 +110,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -120,7 +124,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -138,11 +142,129 @@ public void testCreatingAndUpdatingRoles() throws Exception { case UPGRADED -> { // on upgraded cluster which supports new description field // create/update requests should succeed either way (with or without description) - final String initialRole = randomRoleDescriptorSerialized(randomBoolean()); + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()); createRole(client(), "my-valid-upgraded-role", initialRole); updateRole( "my-valid-upgraded-role", - randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(randomBoolean())) + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()) + ) + ); + } + } + } + + public void testRolesWithManageRoles() throws Exception { + assumeTrue( + "The manage roles privilege is supported after transport version: " + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE, + minimumTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE) + ); + switch (CLUSTER_TYPE) { + case OLD -> { + // Creating role in "old" cluster should succeed when manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-old-role", initialRole); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); + + // and fail if we include manage roles + var createException = expectThrows( + Exception.class, + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + createException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + + RestClient client = client(); + var updateException = expectThrows( + Exception.class, + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + updateException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + case MIXED -> { + try { + this.createClientsByVersion(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE); + // succeed when role manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-valid-mixed-role", initialRole); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); + + // against old node, fail when manage roles is provided either in update or create request + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + + // and against new node in a mixed cluster we should fail + { + Exception e = expectThrows( + Exception.class, + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + } finally { + this.closeClientsByVersion(); + } + } + case UPGRADED -> { + // on upgraded cluster which supports new description field + // create/update requests should succeed either way (with or without description) + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()); + createRole(client(), "my-valid-upgraded-role", initialRole); + updateRole( + "my-valid-upgraded-role", + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()) + ) ); } } @@ -166,10 +288,22 @@ private void updateRole(RestClient client, String roleName, String payload) thro assertThat(created, equalTo(false)); } - private static String randomRoleDescriptorSerialized(boolean includeDescription) { + private static String randomRoleDescriptorSerialized() { + return randomRoleDescriptorSerialized(false, false); + } + + private static String randomRoleDescriptorWithDescriptionSerialized() { + return randomRoleDescriptorSerialized(true, false); + } + + private static String randomRoleDescriptorWithManageRolesSerialized() { + return randomRoleDescriptorSerialized(false, true); + } + + private static String randomRoleDescriptorSerialized(boolean includeDescription, boolean includeManageRoles) { try { return XContentTestUtils.convertToXContent( - XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription)), + XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription, includeManageRoles)), XContentType.JSON ).utf8ToString(); } catch (IOException e) { @@ -177,26 +311,26 @@ private static String randomRoleDescriptorSerialized(boolean includeDescription) } } - private boolean nodeSupportRoleDescription(Map nodeDetails) { + private boolean nodeSupportTransportVersion(Map nodeDetails, TransportVersion transportVersion) { String nodeVersionString = (String) nodeDetails.get("version"); - TransportVersion transportVersion = getTransportVersionWithFallback( + TransportVersion nodeTransportVersion = getTransportVersionWithFallback( nodeVersionString, nodeDetails.get("transport_version"), () -> TransportVersions.ZERO ); - if (transportVersion.equals(TransportVersions.ZERO)) { + if (nodeTransportVersion.equals(TransportVersions.ZERO)) { // In cases where we were not able to find a TransportVersion, a pre-8.8.0 node answered about a newer (upgraded) node. // In that case, the node will be current (upgraded), and remote indices are supported for sure. var nodeIsCurrent = nodeVersionString.equals(Build.current().version()); assertTrue(nodeIsCurrent); return true; } - return transportVersion.onOrAfter(TransportVersions.SECURITY_ROLE_DESCRIPTION); + return nodeTransportVersion.onOrAfter(transportVersion); } - private void createClientsByVersion() throws IOException { - var clientsByCapability = getRestClientByCapability(); + private void createClientsByVersion(TransportVersion transportVersion) throws IOException { + var clientsByCapability = getRestClientByCapability(transportVersion); if (clientsByCapability.size() == 2) { for (Map.Entry client : clientsByCapability.entrySet()) { if (client.getKey() == false) { @@ -224,7 +358,7 @@ private void closeClientsByVersion() throws IOException { } @SuppressWarnings("unchecked") - private Map getRestClientByCapability() throws IOException { + private Map getRestClientByCapability(TransportVersion transportVersion) throws IOException { Response response = client().performRequest(new Request("GET", "_nodes")); assertOK(response); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -232,7 +366,7 @@ private Map getRestClientByCapability() throws IOException Map> hostsByCapability = new HashMap<>(); for (Map.Entry entry : nodesAsMap.entrySet()) { Map nodeDetails = (Map) entry.getValue(); - var capabilitySupported = nodeSupportRoleDescription(nodeDetails); + var capabilitySupported = nodeSupportTransportVersion(nodeDetails, transportVersion); Map httpInfo = (Map) nodeDetails.get("http"); hostsByCapability.computeIfAbsent(capabilitySupported, k -> new ArrayList<>()) .add(HttpHost.create((String) httpInfo.get("publish_address"))); @@ -244,7 +378,7 @@ private Map getRestClientByCapability() throws IOException return clientsByCapability; } - private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { + private static RoleDescriptor randomRoleDescriptor(boolean includeDescription, boolean includeManageRoles) { final Set excludedPrivileges = Set.of( "cross_cluster_replication", "cross_cluster_replication_internal", @@ -255,7 +389,7 @@ private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { randomSubsetOf(Set.of("all", "monitor", "none")).toArray(String[]::new), randomIndicesPrivileges(0, 3, excludedPrivileges), randomApplicationPrivileges(), - null, + includeManageRoles ? randomManageRolesPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(false), Map.of(), From f150e2c11df0fe3bef298c55bd867437e50f5f73 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 14:34:02 +0100 Subject: [PATCH 095/352] Add telemetry for repository usage (#112133) Adds to the `GET _cluster/stats` endpoint information about the snapshot repositories in use, including their types, whether they are read-only or read-write, and for Azure repositories the kind of credentials in use. --- docs/changelog/112133.yaml | 5 ++ docs/reference/cluster/stats.asciidoc | 31 +++++++++- .../repositories/azure/AzureRepository.java | 6 ++ .../azure/AzureStorageService.java | 12 ++++ .../azure/AzureStorageSettings.java | 12 ++++ .../test/repository_azure/20_repository.yml | 13 ++++ .../test/repository_gcs/20_repository.yml | 13 ++++ .../20_repository_permanent_credentials.yml | 13 ++++ .../30_repository_temporary_credentials.yml | 13 ++++ .../40_repository_ec2_credentials.yml | 13 ++++ .../50_repository_ecs_credentials.yml | 13 ++++ .../60_repository_sts_credentials.yml | 13 ++++ server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 2 + .../stats/ClusterStatsNodeResponse.java | 36 ++++++----- .../cluster/stats/ClusterStatsResponse.java | 12 ++++ .../cluster/stats/RepositoryUsageStats.java | 59 +++++++++++++++++++ .../stats/TransportClusterStatsAction.java | 19 ++++-- .../cluster/health/ClusterHealthStatus.java | 2 +- .../repositories/RepositoriesFeatures.java | 23 ++++++++ .../repositories/RepositoriesService.java | 27 +++++++-- .../repositories/Repository.java | 8 +++ .../blobstore/BlobStoreRepository.java | 25 ++++++++ ...lasticsearch.features.FeatureSpecification | 1 + .../cluster/stats/VersionStatsTests.java | 3 +- .../ClusterStatsMonitoringDocTests.java | 25 ++++---- .../AzureRepositoryAnalysisRestIT.java | 37 ++++++++++++ 27 files changed, 400 insertions(+), 37 deletions(-) create mode 100644 docs/changelog/112133.yaml create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java create mode 100644 server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml new file mode 100644 index 0000000000000..11109402b7373 --- /dev/null +++ b/docs/changelog/112133.yaml @@ -0,0 +1,5 @@ +pr: 112133 +summary: Add telemetry for repository usage +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3b429ef427071..c39bc0dcd2878 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1282,6 +1282,31 @@ They are included here for expert users, but should otherwise be ignored. ===== +==== + +`repositories`:: +(object) Contains statistics about the <> repositories defined in the cluster, broken down +by repository type. ++ +.Properties of `repositories` +[%collapsible%open] +===== + +`count`::: +(integer) The number of repositories of this type in the cluster. + +`read_only`::: +(integer) The number of repositories of this type in the cluster which are registered read-only. + +`read_write`::: +(integer) The number of repositories of this type in the cluster which are not registered as read-only. + +Each repository type may also include other statistics about the repositories of that type here. + +===== + +==== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1579,6 +1604,9 @@ The API returns the following response: }, "snapshots": { ... + }, + "repositories": { + ... } } -------------------------------------------------- @@ -1589,6 +1617,7 @@ The API returns the following response: // TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/] // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] +// TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] @@ -1600,7 +1629,7 @@ The API returns the following response: // the plugins that will be in it. And because we figure folks don't need to // see an exhaustive list anyway. // 2. Similarly, ignore the contents of `network_types`, `discovery_types`, -// `packaging_types` and `snapshots`. +// `packaging_types`, `snapshots` and `repositories`. // 3. Ignore the contents of the (nodes) count object, as what's shown here // depends on the license. Voting-only nodes are e.g. only shown when this // test runs with a basic license. diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 388474acc75ea..c8c0b15db5ebe 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -26,6 +26,7 @@ import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.core.Strings.format; @@ -175,4 +176,9 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + protected Set getExtraUsageFeatures() { + return storageService.getExtraUsageFeatures(Repository.CLIENT_NAME.get(getMetadata().settings())); + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d6cd7bf3d246..09088004759a8 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -24,6 +24,7 @@ import java.net.Proxy; import java.net.URL; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; @@ -165,4 +166,15 @@ public void refreshSettings(Map clientsSettings) { this.storageSettings = Map.copyOf(clientsSettings); // clients are built lazily by {@link client(String, LocationMode)} } + + /** + * For Azure repositories, we report the different kinds of credentials in use in the telemetry. + */ + public Set getExtraUsageFeatures(String clientName) { + try { + return getClientSettings(clientName).credentialsUsageFeatures(); + } catch (Exception e) { + return Set.of(); + } + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index b3e8dd8898bea..2333a1fdb9e93 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Set; final class AzureStorageSettings { @@ -130,6 +131,7 @@ final class AzureStorageSettings { private final int maxRetries; private final Proxy proxy; private final boolean hasCredentials; + private final Set credentialsUsageFeatures; private AzureStorageSettings( String account, @@ -150,6 +152,12 @@ private AzureStorageSettings( this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; + this.credentialsUsageFeatures = Strings.hasText(key) ? Set.of("uses_key_credentials") + : Strings.hasText(sasToken) ? Set.of("uses_sas_token") + : SocketAccess.doPrivilegedException(() -> System.getenv("AZURE_FEDERATED_TOKEN_FILE")) == null + ? Set.of("uses_default_credentials", "uses_managed_identity") + : Set.of("uses_default_credentials", "uses_workload_identity"); + // Register the proxy if we have any // Validate proxy settings if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { @@ -366,4 +374,8 @@ private String deriveURIFromSettings(boolean isPrimary) { throw new IllegalArgumentException(e); } } + + public Set credentialsUsageFeatures() { + return credentialsUsageFeatures; + } } diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 299183f26d9dc..a4a7d0b22a0ed 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -235,6 +235,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.azure.count: 1 } + - gte: { repositories.azure.read_write: 1 } + --- teardown: diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index 68d61be4983c5..e8c34a4b6a20b 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -232,6 +232,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.gcs.count: 1 } + - gte: { repositories.gcs.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 77870697f93ae..e88a0861ec01c 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -345,6 +345,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 4a62d6183470d..501af980e17e3 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index e24ff1ad0e559..129f0ba5d7588 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 9c332cc7d9301..de334b4b3df96 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml index 24c2b2b1741d6..09a8526017960 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml @@ -257,6 +257,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index c223db531e688..d412748ed4e57 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -429,6 +429,7 @@ org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, org.elasticsearch.indices.IndicesFeatures, + org.elasticsearch.repositories.RepositoriesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.ingest.IngestGeoIpFeatures, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 582c618216999..41fa34bb5a4a3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -199,6 +199,8 @@ static TransportVersion def(int id) { public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); + public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index d74889b623589..b48295dc8b3eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -20,29 +20,33 @@ import org.elasticsearch.core.Nullable; import java.io.IOException; +import java.util.Objects; public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeInfo nodeInfo; private final NodeStats nodeStats; private final ShardStats[] shardsStats; - private ClusterHealthStatus clusterStatus; + private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; + private final RepositoryUsageStats repositoryUsageStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); - clusterStatus = null; - if (in.readBoolean()) { - clusterStatus = ClusterHealthStatus.readFrom(in); - } + this.clusterStatus = in.readOptionalWriteable(ClusterHealthStatus::readFrom); this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats = RepositoryUsageStats.readFrom(in); + } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; + } } public ClusterStatsNodeResponse( @@ -51,14 +55,16 @@ public ClusterStatsNodeResponse( NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats, - SearchUsageStats searchUsageStats + SearchUsageStats searchUsageStats, + RepositoryUsageStats repositoryUsageStats ) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; this.shardsStats = shardsStats; this.clusterStatus = clusterStatus; - this.searchUsageStats = searchUsageStats; + this.searchUsageStats = Objects.requireNonNull(searchUsageStats); + this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); } public NodeInfo nodeInfo() { @@ -85,20 +91,22 @@ public SearchUsageStats searchUsageStats() { return searchUsageStats; } + public RepositoryUsageStats repositoryUsageStats() { + return repositoryUsageStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (clusterStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(clusterStatus.value()); - } + out.writeOptionalWriteable(clusterStatus); nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } + if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats.writeTo(out); + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 36e7b247befac..b6dd40e8c8b79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsResponse extends BaseNodesResponse r.isEmpty() == false) + // stats should be the same on every node so just pick one of them + .findAny() + .orElse(RepositoryUsageStats.EMPTY); } public String getClusterUUID() { @@ -113,6 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("snapshots"); clusterSnapshotStats.toXContent(builder, params); + builder.field("repositories"); + repositoryUsageStats.toXContent(builder, params); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java new file mode 100644 index 0000000000000..771aa0fbef842 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + * Stats on repository feature usage exposed in cluster stats for telemetry. + * + * @param statsByType a count of the repositories using various named features, keyed by repository type and then by feature name. + */ +public record RepositoryUsageStats(Map> statsByType) implements Writeable, ToXContentObject { + + public static final RepositoryUsageStats EMPTY = new RepositoryUsageStats(Map.of()); + + public static RepositoryUsageStats readFrom(StreamInput in) throws IOException { + final var statsByType = in.readMap(i -> i.readMap(StreamInput::readVLong)); + if (statsByType.isEmpty()) { + return EMPTY; + } else { + return new RepositoryUsageStats(statsByType); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(statsByType, (o, m) -> o.writeMap(m, StreamOutput::writeVLong)); + } + + public boolean isEmpty() { + return statsByType.isEmpty(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (Map.Entry> typeAndStats : statsByType.entrySet()) { + builder.startObject(typeAndStats.getKey()); + for (Map.Entry statAndValue : typeAndStats.getValue().entrySet()) { + builder.field(statAndValue.getKey(), statAndValue.getValue()); + } + builder.endObject(); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index bcf49bca421f6..1912de3cfa4d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -78,6 +79,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final NodeService nodeService; private final IndicesService indicesService; + private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final MetadataStatsCache mappingStatsCache; @@ -90,6 +92,7 @@ public TransportClusterStatsAction( TransportService transportService, NodeService nodeService, IndicesService indicesService, + RepositoriesService repositoriesService, UsageService usageService, ActionFilters actionFilters ) { @@ -103,6 +106,7 @@ public TransportClusterStatsAction( ); this.nodeService = nodeService; this.indicesService = indicesService; + this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -237,12 +241,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq } } - ClusterHealthStatus clusterStatus = null; - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { - clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); - } + final ClusterState clusterState = clusterService.state(); + final ClusterHealthStatus clusterStatus = clusterState.nodes().isLocalNodeElectedMaster() + ? new ClusterStateHealth(clusterState).getStatus() + : null; + + final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); - SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); + final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -250,7 +256,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), - searchUsageStats + searchUsageStats, + repositoryUsageStats ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index d025ddab26af6..c53395b5d76c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,7 @@ public enum ClusterHealthStatus implements Writeable { YELLOW((byte) 1), RED((byte) 2); - private byte value; + private final byte value; ClusterHealthStatus(byte value) { this.value = value; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java new file mode 100644 index 0000000000000..141dac0c5c430 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +public class RepositoriesFeatures implements FeatureSpecification { + public static final NodeFeature SUPPORTS_REPOSITORIES_USAGE_STATS = new NodeFeature("repositories.supports_usage_stats"); + + @Override + public Set getFeatures() { + return Set.of(SUPPORTS_REPOSITORIES_USAGE_STATS); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index de4ae1051ba62..732a18dffe233 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.node.NodeClient; @@ -944,15 +945,33 @@ public List> getPreRestoreVersionChecks() { return preRestoreChecks; } - @Override - protected void doStart() { + public static String COUNT_USAGE_STATS_NAME = "count"; + public RepositoryUsageStats getUsageStats() { + if (repositories.isEmpty()) { + return RepositoryUsageStats.EMPTY; + } + final var statsByType = new HashMap>(); + for (final var repository : repositories.values()) { + final var repositoryType = repository.getMetadata().type(); + final var typeStats = statsByType.computeIfAbsent(repositoryType, ignored -> new HashMap<>()); + typeStats.compute(COUNT_USAGE_STATS_NAME, (k, count) -> (count == null ? 0L : count) + 1); + final var repositoryUsageTags = repository.getUsageFeatures(); + assert repositoryUsageTags.contains(COUNT_USAGE_STATS_NAME) == false : repositoryUsageTags; + for (final var repositoryUsageTag : repositoryUsageTags) { + typeStats.compute(repositoryUsageTag, (k, count) -> (count == null ? 0L : count) + 1); + } + } + return new RepositoryUsageStats( + statsByType.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue()))) + ); } @Override - protected void doStop() { + protected void doStart() {} - } + @Override + protected void doStop() {} @Override protected void doClose() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index fd52c21cad3f8..09f4782b6e5fa 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -312,6 +312,14 @@ void cloneShardSnapshot( */ void awaitIdle(); + /** + * @return a set of the names of the features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + default Set getUsageFeatures() { + return Set.of(); + } + static boolean assertSnapshotMetaThread() { return ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT_META); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e8af752bec179..cc56e940530e8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3943,4 +3943,29 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS ); } + + public static final String READ_ONLY_USAGE_STATS_NAME = "read_only"; + public static final String READ_WRITE_USAGE_STATS_NAME = "read_write"; + + @Override + public final Set getUsageFeatures() { + final var extraUsageFeatures = getExtraUsageFeatures(); + assert extraUsageFeatures.contains(READ_ONLY_USAGE_STATS_NAME) == false : extraUsageFeatures; + assert extraUsageFeatures.contains(READ_WRITE_USAGE_STATS_NAME) == false : extraUsageFeatures; + return Set.copyOf( + Stream.concat(Stream.of(isReadOnly() ? READ_ONLY_USAGE_STATS_NAME : READ_WRITE_USAGE_STATS_NAME), extraUsageFeatures.stream()) + .toList() + ); + } + + /** + * All blob-store repositories include the counts of read-only and read-write repositories in their telemetry. This method returns other + * features of the repositories in use. + * + * @return a set of the names of the extra features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + protected Set getExtraUsageFeatures() { + return Set.of(); + } } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index baf7e53345944..90a1c29972ff3 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -13,6 +13,7 @@ org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures +org.elasticsearch.repositories.RepositoriesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.index.mapper.MapperFeatures org.elasticsearch.ingest.IngestGeoIpFeatures diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 49528c204b042..20eae9833e4b0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -127,7 +127,8 @@ public void testCreation() { null, null, new ShardStats[] { shardStats }, - null + new SearchUsageStats(), + RepositoryUsageStats.EMPTY ); stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index c89638045a5a8..4a695f7c51e4c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsNodeResponse; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.MappingStats; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.action.admin.cluster.stats.VersionStats; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -420,6 +421,7 @@ public void testToXContent() throws IOException { when(mockNodeResponse.nodeStats()).thenReturn(mockNodeStats); when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[] { mockShardStats }); when(mockNodeResponse.searchUsageStats()).thenReturn(new SearchUsageStats()); + when(mockNodeResponse.repositoryUsageStats()).thenReturn(RepositoryUsageStats.EMPTY); final Metadata metadata = testClusterState.metadata(); final ClusterStatsResponse clusterStatsResponse = new ClusterStatsResponse( @@ -533,7 +535,9 @@ public void testToXContent() throws IOException { "fielddata": { "memory_size_in_bytes": 1, "evictions": 0, - "global_ordinals":{"build_time_in_millis":1} + "global_ordinals": { + "build_time_in_millis": 1 + } }, "query_cache": { "memory_size_in_bytes": 0, @@ -563,9 +567,9 @@ public void testToXContent() throws IOException { "file_sizes": {} }, "mappings": { - "total_field_count" : 0, - "total_deduplicated_field_count" : 0, - "total_deduplicated_mapping_size_in_bytes" : 0, + "total_field_count": 0, + "total_deduplicated_field_count": 0, + "total_deduplicated_mapping_size_in_bytes": 0, "field_types": [], "runtime_field_types": [] }, @@ -581,11 +585,11 @@ public void testToXContent() throws IOException { "synonyms": {} }, "versions": [], - "search" : { - "total" : 0, - "queries" : {}, - "rescorers" : {}, - "sections" : {} + "search": { + "total": 0, + "queries": {}, + "rescorers": {}, + "sections": {} }, "dense_vector": { "value_count": 0 @@ -749,7 +753,8 @@ public void testToXContent() throws IOException { "cleanups": 0 }, "repositories": {} - } + }, + "repositories": {} }, "cluster_state": { "nodes_hash": 1314980060, diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index ecc8401e1d79a..a9b8fe51c01cc 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -8,6 +8,8 @@ import fixture.azure.AzureHttpFixture; +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -15,15 +17,20 @@ import org.elasticsearch.test.TestTrustStore; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ObjectPath; +import org.hamcrest.Matcher; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.io.IOException; import java.util.Map; import java.util.function.Predicate; import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); @@ -119,4 +126,34 @@ protected Settings repositorySettings() { return Settings.builder().put("client", "repository_test_kit").put("container", container).put("base_path", basePath).build(); } + + public void testClusterStats() throws IOException { + registerRepository(randomIdentifier(), repositoryType(), true, repositorySettings()); + + final var request = new Request(HttpGet.METHOD_NAME, "/_cluster/stats"); + final var response = client().performRequest(request); + assertOK(response); + + final var objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("repositories.azure.count"), isSetIff(true)); + assertThat(objectPath.evaluate("repositories.azure.read_write"), isSetIff(true)); + + assertThat(objectPath.evaluate("repositories.azure.uses_key_credentials"), isSetIff(Strings.hasText(AZURE_TEST_KEY))); + assertThat(objectPath.evaluate("repositories.azure.uses_sas_token"), isSetIff(Strings.hasText(AZURE_TEST_SASTOKEN))); + assertThat( + objectPath.evaluate("repositories.azure.uses_default_credentials"), + isSetIff((Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY)) == false) + ); + assertThat( + objectPath.evaluate("repositories.azure.uses_managed_identity"), + isSetIff( + (Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_CLIENT_ID)) == false + ) + ); + assertThat(objectPath.evaluate("repositories.azure.uses_workload_identity"), isSetIff(Strings.hasText(AZURE_TEST_CLIENT_ID))); + } + + private static Matcher isSetIff(boolean predicate) { + return predicate ? equalTo(1) : nullValue(Integer.class); + } } From b7e1d5593b42f03aecc387160af6f452c4d25351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:45:53 +0200 Subject: [PATCH 096/352] Fix connection timeout for OpenIdConnectAuthenticator get Userinfo (#112230) * Fix connection timeout for OpenIdConnectAuthenticator get Userinfo * Update docs/changelog/112230.yaml --- docs/changelog/112230.yaml | 5 +++++ .../security/authc/oidc/OpenIdConnectAuthenticator.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112230.yaml diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml new file mode 100644 index 0000000000000..ef12dc3f78267 --- /dev/null +++ b/docs/changelog/112230.yaml @@ -0,0 +1,5 @@ +pr: 112230 +summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 0f34850b861b7..c2e0caf7234cb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -718,7 +718,7 @@ private CloseableHttpAsyncClient createHttpClient() { connectionManager.setMaxTotal(realmConfig.getSetting(HTTP_MAX_CONNECTIONS)); final RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECT_TIMEOUT).getMillis())) - .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getSeconds())) + .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getMillis())) .setSocketTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_SOCKET_TIMEOUT).getMillis())) .build(); From b14bada16f3c66598e18393d8d30271a81096ec3 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 27 Aug 2024 10:44:29 -0400 Subject: [PATCH 097/352] [ML] Update inference interfaces for streaming (#112234) Using InferenceServiceResults and InferenceAction to stream ChunkedToXContent through to the Rest handler. --- .../inference/InferenceServiceResults.java | 24 ++++++++++++++++--- .../inference/action/InferenceAction.java | 20 ++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java index f8330404c1538..0000e0ddc9af9 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -13,17 +13,18 @@ import java.util.List; import java.util.Map; +import java.util.concurrent.Flow; public interface InferenceServiceResults extends NamedWriteable, ChunkedToXContent { /** - * Transform the result to match the format required for the TransportCoordinatedInferenceAction. + *

Transform the result to match the format required for the TransportCoordinatedInferenceAction. * For the inference plugin TextEmbeddingResults, the {@link #transformToLegacyFormat()} transforms the * results into an intermediate format only used by the plugin's return value. It doesn't align with what the * TransportCoordinatedInferenceAction expects. TransportCoordinatedInferenceAction expects an ml plugin - * TextEmbeddingResults. + * TextEmbeddingResults.

* - * For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat. + *

For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat.

*/ List transformToCoordinationFormat(); @@ -37,4 +38,21 @@ public interface InferenceServiceResults extends NamedWriteable, ChunkedToXConte * Convert the result to a map to aid with test assertions */ Map asMap(); + + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Defaults to {@code false}. + */ + default boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the InferenceAction.Results will subscribe to this publisher. + * Implementations should follow the {@link java.util.concurrent.Flow.Publisher} spec to stream the chunks. + */ + default Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index 7ecb5aef4ce8d..c38f508db1b6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Flow; import static org.elasticsearch.core.Strings.format; @@ -391,6 +393,24 @@ public InferenceServiceResults getResults() { return results; } + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Currently set to false while it is being implemented. + */ + public boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the RestHandler will subscribe to this publisher. + * When the RestResponse is finished with the current chunk, it will request the next chunk using the subscription. + * If the RestResponse is closed, it will cancel the subscription. + */ + public Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { From b43470feeb82d602f549b6dfee9243d9afa6ce25 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 27 Aug 2024 07:55:50 -0700 Subject: [PATCH 098/352] Fix nested field generation in StandardVersusLogsIndexModeRandomDataChallengeRestIT (#112223) --- .../logsdb/datageneration/fields/Context.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java index 647d5bff152d1..62130967508f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java @@ -13,6 +13,7 @@ import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; class Context { private final DataGeneratorSpecification specification; @@ -21,13 +22,14 @@ class Context { private final DataSourceResponse.FieldTypeGenerator fieldTypeGenerator; private final DataSourceResponse.ObjectArrayGenerator objectArrayGenerator; private final int objectDepth; - private final int nestedFieldsCount; + // We don't need atomicity, but we need to pass counter by reference to accumulate total value from sub-objects. + private final AtomicInteger nestedFieldsCount; Context(DataGeneratorSpecification specification) { - this(specification, 0, 0); + this(specification, 0, new AtomicInteger(0)); } - private Context(DataGeneratorSpecification specification, int objectDepth, int nestedFieldsCount) { + private Context(DataGeneratorSpecification specification, int objectDepth, AtomicInteger nestedFieldsCount) { this.specification = specification; this.childFieldGenerator = specification.dataSource().get(new DataSourceRequest.ChildFieldGenerator(specification)); this.fieldTypeGenerator = specification.dataSource().get(new DataSourceRequest.FieldTypeGenerator()); @@ -53,7 +55,8 @@ public Context subObject() { } public Context nestedObject() { - return new Context(specification, objectDepth + 1, nestedFieldsCount + 1); + nestedFieldsCount.incrementAndGet(); + return new Context(specification, objectDepth + 1, nestedFieldsCount); } public boolean shouldAddObjectField() { @@ -63,7 +66,7 @@ public boolean shouldAddObjectField() { public boolean shouldAddNestedField() { return childFieldGenerator.generateNestedSubObject() && objectDepth < specification.maxObjectDepth() - && nestedFieldsCount < specification.nestedFieldsLimit(); + && nestedFieldsCount.get() < specification.nestedFieldsLimit(); } public Optional generateObjectArray() { From ed515138160da2b2431fd93462d3f3b7178e2e1b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 27 Aug 2024 10:57:17 -0400 Subject: [PATCH 099/352] ESQL: Remove `LogicalPlan` from old serialization (#112237) This removes `LogicalPlan` subclasses from `PlanNamedTypes` because it is no longer used. --- .../xpack/esql/io/stream/PlanNamedTypes.java | 35 +------------ .../esql/io/stream/PlanNamedTypesTests.java | 52 ------------------- 2 files changed, 1 insertion(+), 86 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 180ba8c028e6a..77d982453203c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -23,24 +23,9 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.index.EsIndex; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -132,25 +117,7 @@ public static List namedTypeEntries() { of(PhysicalPlan.class, ProjectExec.class, PlanNamedTypes::writeProjectExec, PlanNamedTypes::readProjectExec), of(PhysicalPlan.class, RowExec.class, PlanNamedTypes::writeRowExec, PlanNamedTypes::readRowExec), of(PhysicalPlan.class, ShowExec.class, PlanNamedTypes::writeShowExec, PlanNamedTypes::readShowExec), - of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec), - // Logical Plan Nodes - a subset of plans that end up being actually serialized - of(LogicalPlan.class, Aggregate.ENTRY), - of(LogicalPlan.class, Dissect.ENTRY), - of(LogicalPlan.class, EsRelation.ENTRY), - of(LogicalPlan.class, Eval.ENTRY), - of(LogicalPlan.class, Enrich.ENTRY), - of(LogicalPlan.class, EsqlProject.ENTRY), - of(LogicalPlan.class, Filter.ENTRY), - of(LogicalPlan.class, Grok.ENTRY), - of(LogicalPlan.class, InlineStats.ENTRY), - of(LogicalPlan.class, Join.ENTRY), - of(LogicalPlan.class, Limit.ENTRY), - of(LogicalPlan.class, LocalRelation.ENTRY), - of(LogicalPlan.class, Lookup.ENTRY), - of(LogicalPlan.class, MvExpand.ENTRY), - of(LogicalPlan.class, OrderBy.ENTRY), - of(LogicalPlan.class, Project.ENTRY), - of(LogicalPlan.class, TopN.ENTRY) + of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec) ); return declared; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index e5f195b053349..56ab1bd41693e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -38,24 +38,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -130,40 +112,6 @@ public void testPhysicalPlanEntries() { assertMap(actual, matchesList(expected)); } - // List of known serializable logical plan nodes - this should be kept up to date or retrieved - // programmatically. - public static final List> LOGICAL_PLAN_NODE_CLS = List.of( - Aggregate.class, - Dissect.class, - Enrich.class, - EsRelation.class, - EsqlProject.class, - Eval.class, - Filter.class, - Grok.class, - InlineStats.class, - Join.class, - Limit.class, - LocalRelation.class, - Lookup.class, - MvExpand.class, - OrderBy.class, - Project.class, - TopN.class - ); - - // Tests that all logical plan nodes have a suitably named serialization entry. - public void testLogicalPlanEntries() { - var expected = LOGICAL_PLAN_NODE_CLS.stream().map(Class::getSimpleName).toList(); - var actual = PlanNamedTypes.namedTypeEntries() - .stream() - .filter(e -> e.categoryClass().isAssignableFrom(LogicalPlan.class)) - .map(PlanNameRegistry.Entry::name) - .sorted() - .toList(); - assertMap(actual, matchesList(expected)); - } - // Tests that all names are unique - there should be a good reason if this is not the case. public void testUniqueNames() { var actual = PlanNamedTypes.namedTypeEntries().stream().map(PlanNameRegistry.Entry::name).distinct().toList(); From bd2d6aa55fdf839ca42ebf04a6493732b6c94b24 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 27 Aug 2024 09:14:49 -0600 Subject: [PATCH 100/352] Fix template alias parsing livelock (#112217) * Fix template alias parsing livelock This commit fixes an issue with templates parsing alias definitions that can cause the ES thread to hang indefinitely. Due to the malformed alias definition, the parsing gets into a loop which never exits. In this commit a null check in both the component template and alias parsing code is added, which prevents the looping. --- docs/changelog/112217.yaml | 5 +++++ .../cluster/metadata/AliasMetadata.java | 2 ++ .../cluster/metadata/Template.java | 6 +++++- .../metadata/ComponentTemplateTests.java | 19 +++++++++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112217.yaml diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml new file mode 100644 index 0000000000000..bb367d6128001 --- /dev/null +++ b/docs/changelog/112217.yaml @@ -0,0 +1,5 @@ +pr: 112217 +summary: Fix template alias parsing livelock +area: Indices APIs +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java index a0f4a929dafdb..ff412d629b3b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java @@ -396,6 +396,8 @@ public static AliasMetadata fromXContent(XContentParser parser) throws IOExcepti } else if ("is_hidden".equals(currentFieldName)) { builder.isHidden(parser.booleanValue()); } + } else if (token == null) { + throw new IllegalArgumentException("unexpected null token while parsing alias"); } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 70440adc4ebbe..b044ef6042428 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -70,7 +70,11 @@ public class Template implements SimpleDiffable